query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Removes alignments from ``alignment_infos`` that have substantially lower Model 4 scores than the best alignment
def prune(self, alignment_infos): alignments = [] best_score = 0 for alignment_info in alignment_infos: score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self) best_score = max(score, best_score) alignments.append((alignment_info, score)) threshold = IBMModel5.MIN_SCORE_FACTOR * best_score alignments = [a[0] for a in alignments if a[1] > threshold] return set(alignments)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hillclimb(self, alignment_info, j_pegged=None):\n alignment = alignment_info # alias with shorter name\n max_probability = IBMModel4.model4_prob_t_a_given_s(alignment, self)\n\n while True:\n old_alignment = alignment\n for neighbor_alignment in self.neighboring(alignment, j_pegged):\n neighbor_probability = IBMModel4.model4_prob_t_a_given_s(\n neighbor_alignment, self\n )\n\n if neighbor_probability > max_probability:\n alignment = neighbor_alignment\n max_probability = neighbor_probability\n\n if alignment == old_alignment:\n # Until there are no better alignments\n break\n\n alignment.score = max_probability\n return alignment", "def condenseGappyAlignment(a, thresh=0.9):\n\n a = padAlignment(a)\n smat = align2mat(a)\n gapSiteInd = np.mean(smat == b'-', axis=0) >= thresh\n keepSeqInd = np.all(smat[:, gapSiteInd] == b'-', axis=1)\n print('Removing %d of %d sites and %d of %d sequences from the alignment.' % (gapSiteInd.sum(), smat.shape[1], (~keepSeqInd).sum(), smat.shape[0]))\n\n smat = smat[keepSeqInd,:]\n smat = smat[:, ~gapSiteInd]\n \n return seqmat2align(smat, index=a.index[keepSeqInd])", "def remove_outliers(seqs, num_stds, fraction_seqs_for_stats=.95):\r\n # load the alignment and compute the consensus sequence\r\n aln = Alignment.from_fasta_records(parse_fasta(seqs), DNA)\r\n consensus_seq = aln.majority_consensus()\r\n # compute the hamming distance between all sequences in the alignment\r\n # and the consensus sequence\r\n dists_to_consensus = [s.distance(consensus_seq) for s in aln]\r\n # compute the average and standard deviation distance from the consensus\r\n average_distance = mean(dists_to_consensus)\r\n std_distance = std(dists_to_consensus)\r\n # compute the distance cutoff\r\n dist_cutoff = average_distance + num_stds * std_distance\r\n # for all sequences, determine if they're distance to the consensus\r\n # is less then or equal to the cutoff distance. if so, add the sequence's\r\n # identifier to the list of sequence identifiers to keep\r\n seqs_to_keep = []\r\n for seq_id, dist_to_consensus in izip(aln.ids(), dists_to_consensus):\r\n if dist_to_consensus <= dist_cutoff:\r\n seqs_to_keep.append(seq_id)\r\n # filter the alignment to only keep the sequences identified in the step\r\n # above\r\n filtered_aln = aln.subalignment(seqs_to_keep=seqs_to_keep)\r\n # and return the filtered alignment\r\n return filtered_aln", "def remove_outliers(seqs, num_sigmas, fraction_seqs_for_stats=.95):\n aln = DenseAlignment(data=seqs, MolType=DNA)\n cons = DenseAlignment(data=aln.majorityConsensus(), MolType=DNA)\n diff_mtx = cons.SeqData[:,0] != aln.SeqData\n \n # consider only a fraction of seqs for mean, std\n seq_diffs = diff_mtx.sum(1)\n num_to_consider = round(len(seq_diffs)*fraction_seqs_for_stats)\n seq_diffs_considered_sorted = \\\n seq_diffs[seq_diffs.argsort()[:num_to_consider]]\n diff_cutoff = seq_diffs_considered_sorted.mean() + \\\n num_sigmas*seq_diffs_considered_sorted.std()\n # mean + e.g.: 4 sigma\n seq_idxs_to_keep = numpy.arange(len(seq_diffs))[seq_diffs <= diff_cutoff]\n \n filtered_aln = aln.getSubAlignment(seq_idxs_to_keep)\n return filtered_aln", "def filter_samfile(temp_alignment, filtered_out):\n # Check the quality and status of each aligned fragment.\n # Write the ones with good quality in the final output file.\n # Keep those that do not map unambiguously for the next round.\n\n unaligned = set()\n temp_sam = ps.AlignmentFile(temp_alignment, \"r\")\n outf = ps.AlignmentFile(filtered_out, \"w\", template=temp_sam)\n for r in temp_sam:\n if r.flag in [0, 16] and r.mapping_quality >= 30:\n outf.write(r)\n else:\n unaligned.add(r.query_name)\n\n print(\"{0} reads left to map.\".format(len(unaligned)))\n temp_sam.close()\n outf.close()\n\n return unaligned", "def remove_gapped_columns(aln):\n cols = zip(* aln.values())\n ind = util.find(lambda col: \"-\" not in col, cols)\n return subalign(aln, ind)", "def CleanUp(self):\n for Ind in self.IndList():\n if amax(abs(self[Ind]))<1e-10:\n del self[Ind]", "def test_align_without_gaps(self):\n aln = ArrayAlignment(\n {\"seq1\": \"ACGG\", \"seq2\": \"CGCA\", \"seq3\": \"CCG-\"}, moltype=\"dna\"\n )\n aln_plot = aln.dotplot(\"seq1\")\n self.assertNotEqual(aln_plot._aligned_coords, None)", "def test_alignments(self):\n # test against the correct input file\n parser = Lav(self.__correct_file)\n for alignment in parser.alignments():\n self.assertEqual(len(alignment), 7)\n for alignment in parser.alignments(gapped=False):\n self.assertEqual(len(alignment), 8)\n # test againts incorrect input files\n for lav_file in self.__incorrect_files:\n parser = Lav(os.path.join(self.__incorrect_file_dir,\n lav_file))\n with self.assertRaises(LavError):\n for alignment in parser.alignments():\n self.assertIsInstance(alignment,\n Lav.GapFreeAlignment)", "def eliminate(self):\n deleteKey = []\n for key,value in self._sets[self._currentSet].items():\n if value < self._minSupport:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._sets[self._currentSet][key]", "def downsamplealignment(keeplist, alignment, newalignmentname):\n newalignmentwrite = open(newalignmentname, 'w')\n with open(alignment, 'r') as alignmentfile:\n WRITENOW=False\n for line in alignmentfile.readlines():\n if WRITENOW==False and '>' in line and line.split('>')[1].strip() in keeplist:\n newalignmentwrite.write(line)\n WRITENOW=True\n elif WRITENOW==True:\n newalignmentwrite.write(line)\n WRITENOW=False", "def find(self) -> bool:\n alignments = []\n for sw_idx in range(len(self.sw)):\n for nu_idx in range(len(self.nu)):\n alignments.append(Alignment(self.nu, self.sw, nu_idx, sw_idx, self.orig_nu))\n alignment = max(alignments, key=lambda align: align.score)\n if alignment.score > 0:\n self.alignment = alignment\n return True\n return False", "def test_EstimateDistances_fromUnaligned(self):\n d = EstimateDistances(self.collection, JC69(), do_pair_align=True,\n rigorous_align=True)\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)\n \n d = EstimateDistances(self.collection, JC69(), do_pair_align=True,\n rigorous_align=False)\n d.run()\n canned_result = {('b', 'e'): 0.440840,\n ('c', 'e'): 0.440840,\n ('a', 'c'): 0.088337,\n ('a', 'b'): 0.188486,\n ('a', 'e'): 0.440840,\n ('b', 'c'): 0.0883373}\n result = d.getPairwiseDistances()\n self.assertDistsAlmostEqual(canned_result, result)", "def eliminateRules(self):\n deleteKey = []\n for key,value in self._rules.items():\n if value[0] < self._minConfidence:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._rules[key]", "def preprocess_matches(matches):\n good_matches = []\n for m, n in matches:\n if m.distance < 0.7 * n.distance:\n good_matches.append(m)\n\n return good_matches", "def remove_low_quality_for_matched(matches, read_count, phreds, min_phred_score, ditched_f=None):\n\tcount = count_unique = 0\n\tkk = matches.keys()\n\tfor k in kk:\n\t\tm = matches[k]\n\t\tif any( x < min_phred_score for x in phreds[m.read.tostring()] ):\n\t\t\tcount += read_count[m.read.tostring()]\n\t\t\tcount_unique += 1\n\t\t\tif ditched_f is not None:\n\t\t\t\tditched_f.write(\"@{id}\\n{seq}\\n+{id}\\n{qual}\\n\".format( id=k, seq=m.read, \\\n\t\t\t\t\tqual=m.quality ))\n\t\t\tdel matches[k]\n\t\t\tdel read_count[m.read.tostring()]\n\t\t\tdel phreds[m.read.tostring()]\n\treturn count, count_unique", "def remove_cds_and_remap_reads(self, cds_aln):\n super(GreedySolver, self).remove_cds_and_remap_reads(cds_aln)\n # Dictionary where key is read_id and value is cds alignment to which it maps.\n # If it does not map to any cds alignment then value is None.\n new_read_mappings = {}\n\n for aln_reg in cds_aln.aligned_regions.values():\n if aln_reg.active:\n # Find alternative cds alignment with highest coverage\n best_alt_cds_aln = None\n for alt_cds_aln in self._cds_aln_container.read2cds[aln_reg.read_id]:\n if best_alt_cds_aln == None or self._get_coverage(alt_cds_aln) > self._get_coverage(best_alt_cds_aln): \n best_alt_cds_aln = alt_cds_aln\n # Activate it in best alternative cds alignment (if there is one)\n if (best_alt_cds_aln != None):\n best_alt_cds_aln.aligned_regions[aln_reg.read_id].active = True\n # Add mapping to output dictionary\n new_read_mappings[aln_reg.read_id] = best_alt_cds_aln\n\n # Delete original cds alignment\n del self._cds_aln_container.cds_repository[cds_aln.cds]\n # Remove original cds alignment from read2cds\n for cds_alns in self._cds_aln_container.read2cds.values():\n if cds_aln in cds_alns: cds_alns.remove(cds_aln)\n\n # Force recalculation of coverage for updated cds alignments by forgeting coverage\n for updated_cds_aln in set(filter(lambda x: x != None, new_read_mappings.values())):\n del self._coverages[updated_cds_aln]\n\n return new_read_mappings", "def filter_four_fold(aln):\n\n aln_codons = filter_aligned_codons(aln)\n ind = find_four_fold(aln_codons)\n return subalign(aln_codons, ind)", "def test_remove_outliers(self):\r\n aln = [\r\n '>ACT009', 'ACAT-',\r\n '>ACT019', 'GACT-',\r\n '>ACT_02', 'GACT-',\r\n '>ACT_03', 'AACT-',\r\n '>ACT_04', 'AACT-',\r\n '>ACT_05', 'AACT-',\r\n '>ACT011', 'CTGGC',\r\n '>hello', 'AACTG',\r\n ]\r\n # mean errors is 10/9 .\r\n seqnames = []\r\n for elem in aln:\r\n if elem.startswith('>'):\r\n seqnames.append(elem[1:])\r\n seqs = []\r\n for elem in aln:\r\n if not elem.startswith('>'):\r\n seqs.append(elem)\r\n\r\n # just remove ACT011\r\n res = remove_outliers(aln, 2)\r\n self.assertEqual(res.sequence_count(), 7)\r\n for seqname_left in res.ids():\r\n self.assertTrue(seqname_left in seqnames)\r\n self.assertTrue('ACT011' not in res.ids())\r\n\r\n # now remove all that deviate have > 10/9 (2 or more) substitutions:\r\n res = remove_outliers(aln, 0)\r\n self.assertEqual(res.sequence_count(), 6)\r\n for seqname_left in res.ids():\r\n self.assertTrue(seqname_left in seqnames)\r\n self.assertTrue('ACT011' not in res.ids())\r\n self.assertTrue('ACT009' not in res.ids())", "def sample(self, sentence_pair):\n sampled_alignments, best_alignment = super().sample(sentence_pair)\n return self.prune(sampled_alignments), best_alignment", "def filter_aligned_codons(aln):\n\n ind = find_aligned_codons(aln)\n return subalign(aln, ind)", "def checkSwapsAndClean( self, # For comparison the NRG tags and defaults on March 2nd, 2011 are presented.\n energy_abs_criterium = 0.1, # _Stereo_assign_list.Crit_abs_e_diff 0.100\n energy_rel_criterium = 0.0, # _Stereo_assign_list.Crit_rel_e_diff 0.000\n swapPercentage = 75.0, # _Stereo_assign_list.Crit_mdls_favor_pct 75.0\n singleModelCutoff = 1.0, # _Stereo_assign_list.Crit_sing_mdl_viol 1.000 (inclusive)\n multiModelCutoff = 0.5, # _Stereo_assign_list.Crit_multi_mdl_viol 0.500 (inclusive)\n multiModelPercentageCutoff = 50.0, # _Stereo_assign_list.Crit_multi_mdl_pct 50.0 (inclusive)\n method = 'SUM_AVERAGING', # TODO: code others.\n outputFileName = 'stereo_assign.str', # will be written to current directory if not an absolute path. Ignored if output type is custom\n debug = False, # Print debug info?\n useLowestAromaticViolation = False, # Check for lowest violation for single HD1/2 HE1/2 distance constraint items\n outputType = 'NMRSTAR' # Will write out NMR-STAR file. Can also be 'custom', will then only print info\n ):\n if not self.distanceConstraintLists or not self.structureEnsemble or not self.structureEnsemble.models:\n print \"Error: no constraint lists or no structures available! Aborting...\"\n return True\n\n #\n # Initialize... see parameters above for swapPercentage\n #\n # Set a dictionary with violationCodes (what is a large violation?)\n #\n # smallFloat = 0.000000000001 # same for cutoff distance and fraction\n\n negativeFraction = -999.9 # fraction set to always happen as it's under cut off.\n\n self.violationCodes = {}\n self.violationCodes['xl'] = {'violation': singleModelCutoff, 'fraction': negativeFraction}\n self.violationCodes['l'] = {'violation': multiModelCutoff, 'fraction': multiModelPercentageCutoff/100.}\n self.violationCodes[self.VIOLATION_CODE_REPORTINGX_STR] = {'violation': singleModelCutoff, 'fraction': negativeFraction}\n self.violationCodes[self.VIOLATION_CODE_REPORTINGL_STR] = {'violation': multiModelCutoff, 'fraction': negativeFraction}\n self.violationCodes[self.VIOLATION_CODE_REPORTINGS_STR] = {'violation': 0.0, 'fraction': negativeFraction}\n\n\n # JFD changed indentation here so that below statement is always executed.\n # Order in which they are checked, if found will abort so xl violation is prioritized\n self.violationCodeList = ['xl','l',\n self.VIOLATION_CODE_REPORTINGS_STR,\n self.VIOLATION_CODE_REPORTINGL_STR,\n self.VIOLATION_CODE_REPORTINGX_STR ]\n for violationCode in self.violationCodeList:\n if not self.violationCodes.has_key(violationCode):\n print 'ERROR: expected violationCode [%s] in StereoAssignmentCleanup.violationCodes ' % violationCode\n return True\n# print 'DEBUG: self.violationCode[%s] : %s' % ( violationCode, str(self.violationCodes[violationCode]))\n\n #\n # Initialise some variables\n #\n\n self.useLowestAromaticViolation = useLowestAromaticViolation\n\n #\n # Set the factor for calculating violations\n #\n\n self.method = method\n if self.method == 'SUM_AVERAGING':\n self.factor = 1.0/6.0\n\n #\n # Initialise resonance and 'triplet' information\n #\n\n print\n print \"Checking swap status and cleaning prochiral groups in constraint lists...\"\n print\n\n (self.resAtomDict,self.resAtomSetDict) = createResonanceAtomAndAtomSetDict(self.distanceConstraintLists[0].parent.fixedResonances)\n if self.verbose:\n print \"Made resAtomDict, resAtomSetDict\"\n\n # resAtomSwapDict is list of atoms associated with a resonance, prochiralResonancesDict links to (chainCode,seqId,prochiralChemAtomSet) tuple\n (self.resAtomSwapDict,self.prochiralResonancesDict) = createResAtomSwapDict(self.resAtomSetDict,compareWithWattos=self.compareWithWattos)\n if self.verbose:\n print \"Made resAtomSwapDict,prochiralResonancesDict\"\n\n self.triplets = {}\n\n # Generate a list of triplets, only for ones that have resonances - rest is dealt with later on.\n resList = self.prochiralResonancesDict.keys()\n resList.sort()\n\n for res in resList:\n atomTuple = self.resAtomDict[res]\n prochiralKey = self.prochiralResonancesDict[res]\n\n if not self.triplets.has_key(prochiralKey):\n self.triplets[prochiralKey] = {}\n\n if not self.triplets[prochiralKey].has_key(atomTuple):\n self.triplets[prochiralKey][atomTuple] = []\n\n self.triplets[prochiralKey][atomTuple].append(res)\n\n #\n # Now prioritise the triplets...\n #\n\n prochiralPriority = {}\n self.prochiralConstraints = {}\n\n prochiralKeys = self.triplets.keys()\n prochiralKeys.sort()\n Triplet_count = len(prochiralKeys)\n if Triplet_count < 1:\n print \"WARNING: expected at least one triplet. Are there SSA distance restraints available?\"\n return\n invalidTripletCount = 0 # Like 1a24 1 185 LEU CD* that is invalid and can easily be recognized because it gets no involved restraints.\n for prochiralKey in prochiralKeys:\n #print prochiralKey\n atomTuples = self.triplets[prochiralKey].keys()\n atomTuples.sort()\n connectedConstraints = []\n unambiguousStereoConstraints = [] # These are constraints where there is no additional stereo ambiguity in the constraint items involving the prochiral\n allResonancesSet = set()\n\n otherItems = {}\n\n for atomTuple in atomTuples:\n #print \"\",atomTuple,triplets[prochiralKey][atomTuple]\n for resonance in self.triplets[prochiralKey][atomTuple]:\n allResonancesSet.add(resonance) # Note will not add the same item twice, so this is fine!\n for constraintItem in resonance.pairwiseConstraintItems:\n constraint = constraintItem.constraint\n if not otherItems.has_key(constraint):\n otherItems[constraint] = {}\n\n # Track other resonance in the item for filtering out fully ambiguous restraints\n orderedResonances = list(constraintItem.orderedResonances)\n otherResonance = orderedResonances[not orderedResonances.index(resonance)]\n if otherResonance not in otherItems[constraint]: # Use this now for future Python3 compatibility\n otherItems[constraint][otherResonance] = set()\n otherItems[constraint][otherResonance].add(resonance)\n\n if constraint.className in ('DistanceConstraint','HBondConstraint'):\n if constraint not in connectedConstraints:\n connectedConstraints.append(constraint)\n # So only 'unambiguous' if the 'other' resonance in the item has a resonance assignment, is assigned to one atomSet, and is prochiral (so could be deassigned)\n if otherResonance.resonanceSet and len(otherResonance.resonanceSet.atomSets) == 1 and otherResonance in self.prochiralResonancesDict:\n #if self.resAtomDict[resonance][0].residue.seqId == 48:\n # print self.resAtomDict[resonance], self.resAtomDict[otherResonance], otherResonance.resonanceSet.atomSets\n unambiguousStereoConstraints.append(constraint)\n else:\n pass\n# print 'DEBUG: ambi in %s:\\n %s' % (prochiralKey, ccpnDistanceRestraintToString(constraint)) # JFD doesn't know how to easily show atoms here.\n\n #\n # Clean up restraints so that constraints that are already fully ambiguous for the prochiral resonances (and they point to exactly the same resonances) are not included in the list to check..\n #\n\n if len(allResonancesSet) > 1:\n for constraint in otherItems:\n allMatch = True\n for otherResonance in otherItems[constraint]:\n if allResonancesSet != otherItems[constraint][otherResonance]:\n allMatch = False\n break\n\n if allMatch:\n if constraint in connectedConstraints:\n connectedConstraints.pop(connectedConstraints.index(constraint))\n if constraint in unambiguousStereoConstraints:\n unambiguousStereoConstraints.pop(unambiguousStereoConstraints.index(constraint))\n\n #\n # Set their priority\n #\n\n chainIdCcpn = prochiralKey[0]\n resIdCcpn = prochiralKey[1]\n chemAtomSetName = prochiralKey[2].name\n priorityKey = (len(connectedConstraints),len(unambiguousStereoConstraints),chainIdCcpn,resIdCcpn,chemAtomSetName)\n# print \"DEBUG: priorityKey:\", priorityKey\n if not prochiralPriority.has_key(priorityKey):\n prochiralPriority[priorityKey] = []\n\n prochiralPriority[priorityKey].append(prochiralKey)\n\n connectedConstraints.sort()\n self.prochiralConstraints[prochiralKey] = connectedConstraints\n\n \n #\n # Sort by priority and reorganise...\n #\n \n priorityKeys = prochiralPriority.keys()\n\n ## custom sort needs to return an int.\n def tripletComparator(x, y):\n if x[0] != y[0]:\n return x[0] - y[0] # ascending connectedConstraints\n# if not self.compareWithWattos:\n if x[1] != y[1]:\n return y[1] - x[1] # ascending unambiguousStereoConstraints\n if x[2] != y[2]:\n if x[2] < y[2]: # descending chainIdCcpn character\n return 1\n else:\n return -1\n resIdX = int(x[3])\n resIdY = int(y[3])\n if resIdX != resIdY:\n return resIdY - resIdX # descending resIdCcpn\n if x[4] != y[4]:\n if x[4] < y[4]: # descending chemAtomSetName\n return 1\n else:\n return -1\n return 0\n # end def\n\n priorityKeys.sort(cmp=tripletComparator)\n priorityKeys.reverse()\n\n if debug:\n for pk in priorityKeys:\n for pck in prochiralPriority[pk]:\n print \"pck: \", pck\n for at in self.triplets[pck].keys():\n print \" at, self.triplets[pck][at]: \",at, self.triplets[pck][at]\n print\n\n #\n # Now calculate the total 'energy' for each constraint, and track whether there are any serious violations\n #\n # The 'energy' is the sum of the squared violations (over all models and restraints).\n #\n\n self.createAtomCoordDict() # This is static, fine to keep like this!\n\n # Corresponds to the indexes of avgLocalSums\n\n self.swapTypes = [self.SWAP_TYPE_ORG,'swapped']\n self.constraintItemsReset = []\n\n #\n # First only do swapping...\n #\n\n swapInfo = {}\n orgMaxViolation = {}\n orgViolationSingleModelCriteriumCount = {}\n orgViolationMultiModelCriteriumCount = {}\n\n Swap_count = 0 # Using captial to distinguish from original FC and use exact same as Wattos.\n Deassign_count = 0\n Total_e_low_states = 0.0\n Total_e_high_states = 0.0\n tripletIdx = 0\n for priorityKey in priorityKeys:\n for prochiralKey in prochiralPriority[priorityKey]:\n tripletIdx += 1\n if debug:\n print prochiralKey\n\n (prochiralViolationInfo,allConstraintItems) = self.checkProchiralKeyConstraints(prochiralKey,debug)\n\n # Find max violation of original assignment\n orgMaxViolation[ prochiralKey] = 0.0\n orgViolationSingleModelCriteriumCount[ prochiralKey] = 0\n orgViolationMultiModelCriteriumCount[ prochiralKey] = 0\n violResultTupleList = prochiralViolationInfo[self.SWAP_TYPE_ORG][self.REQUIRES_DEASSIGNMENT_STR]\n for violationCode, violationList in violResultTupleList:\n if violationCode == self.VIOLATION_CODE_REPORTINGS_STR: # Includes any possible violation.\n orgMaxViolation[prochiralKey] = max( orgMaxViolation[prochiralKey], max(violationList)) # a list of violations\n elif violationCode == self.VIOLATION_CODE_REPORTINGX_STR:\n orgViolationSingleModelCriteriumCount[prochiralKey] += self.numModels - violationList.count(0.0)\n elif violationCode == self.VIOLATION_CODE_REPORTINGL_STR:\n orgViolationMultiModelCriteriumCount[prochiralKey] += self.numModels - violationList.count(0.0)\n # end for violation results\n\n #\n # Now check whether needs to be swapped\n #\n\n doSwapCount = 0.0\n totalEnergyHighState = 0.0 # actual high state will be determined after next loop. For now assume state 0 (unswapped)\n totalEnergyLowState = 0.0\n for modelIndex in range(self.numModels):\n energyHighState = prochiralViolationInfo[self.swapTypes[0]]['energy'][modelIndex]\n energyLowState = prochiralViolationInfo[self.swapTypes[1]]['energy'][modelIndex]\n\n# totalEnergyDiff = prochiralViolationInfo[self.swapTypes[0]]['energy'][modelIndex] - prochiralViolationInfo[self.swapTypes[1]]['energy'][modelIndex] # this is a bug? Needs to be cumulative over models.\n totalEnergyHighState += energyHighState\n totalEnergyLowState += energyLowState\n if energyHighState > energyLowState: # swapping needed because for this model the assumption on the unswapped being the highest energy state was correct\n doSwapCount += 1.0\n# print \"DEBUG: tripletIdx,modelIndex,energyHighState,energyLowState: %s\" % str((tripletIdx,modelIndex,energyHighState,energyLowState))\n # end for model loop\n swappedFavouredFraction = doSwapCount / self.numModels\n\n # Adapted from Wattos\n totalEnergyHighState /= self.numModels # For criteria it's important to use one that can be compared over entries. Ensemble size should not influence result.\n totalEnergyLowState /= self.numModels\n if totalEnergyHighState < totalEnergyLowState: # Get this right before deciding on swapping.\n tmpEnergy = totalEnergyHighState\n totalEnergyHighState = totalEnergyLowState\n totalEnergyLowState = tmpEnergy\n # end if\n energyDifference = totalEnergyHighState - totalEnergyLowState # guaranteed positive or zero\n totalEnergyDiff = energyDifference # FC name\n percentageModelFavoured = 100.0 * swappedFavouredFraction\n if totalEnergyHighState > 0.0: # Strange in Wattos code there's no safety on totalEnergyHighState being zero. Added here.\n energyDifferencePercentage = 100.0 * energyDifference / totalEnergyHighState\n else:\n energyDifferencePercentage = 0.0\n if energyDifference > 0.0:\n energyDifferencePercentage = 100.0\n # end if/else\n\n # If any criteria is not met then the assignment will be maintained.\n swapAssignment = False\n if totalEnergyHighState <= totalEnergyLowState:\n msg = \"criterium not met: totalEnergyHighState > totalEnergyLowState: %.3f and %.3f\" % ( totalEnergyHighState, totalEnergyLowState )\n elif percentageModelFavoured < swapPercentage:\n msg = \"criterium not met: percentageModelFavoured >= swapPercentage: %.1f %.1f\" % ( percentageModelFavoured, swapPercentage)\n elif energyDifference < energy_abs_criterium: # If diff is close to zero do nothing.\n msg = \"criterium not met: energyDifference >= energy_abs_criterium: %.3f and %.3f\" % ( energyDifference, energy_abs_criterium )\n elif energyDifferencePercentage < energy_rel_criterium:\n msg = \"criterium not met: energyDifferencePercentage >= energy_rel_criterium: %.1f %.1f\" % ( energyDifferencePercentage, energy_rel_criterium)\n else:\n swapAssignment = True\n # end if/else\n if not swapAssignment:\n print \"DEBUG maintaining tripletIdx %s because %s\" % ( tripletIdx, msg)\n else:\n print \"DEBUG swapping tripletIdx %s\" % tripletIdx\n # end if\n finalSwapType = self.swapTypes[0]\n favouredPercent = (1 - swappedFavouredFraction) * 100.0\n if swapAssignment:\n finalSwapType = self.swapTypes[1]\n favouredPercent = 100.0 - favouredPercent\n Swap_count += 1\n\n Total_e_low_states += totalEnergyLowState\n Total_e_high_states += totalEnergyHighState\n swapInfo[prochiralKey] = (swapAssignment,finalSwapType,energyDifferencePercentage,totalEnergyDiff, totalEnergyHighState, totalEnergyLowState,\n favouredPercent,swappedFavouredFraction,tripletIdx)\n\n\n #\n # Now make changes in CCPN... deassignment gets priority over swapping.\n #\n\n if swapAssignment:\n\n prochiralResonances = []\n for resList in self.triplets[prochiralKey].values():\n for resonance in resList:\n if not resonance in prochiralResonances:\n prochiralResonances.append(resonance)\n\n #\n # Switch the assignments...\n #\n \n if debug:\n print\n print \"SWAPPING\", prochiralResonances\n print\n\n if len(prochiralResonances) == 2:\n\n resSet1 = prochiralResonances[0].resonanceSet\n atomSet1 = resSet1.sortedAtomSets()[0]\n resSet2 = prochiralResonances[1].resonanceSet\n atomSet2 = resSet2.sortedAtomSets()[0]\n\n resSet1.addAtomSet(atomSet2)\n resSet1.removeAtomSet(atomSet1)\n resSet2.addAtomSet(atomSet1)\n resSet2.removeAtomSet(atomSet2)\n\n # Reset some dictionaries as well - note that resAtomSwapDict gives atoms of the *other* prochiral, so below is correct!\n atomTuple1 = tuple(atomSet1.sortedAtoms())\n atomTuple2 = tuple(atomSet2.sortedAtoms())\n\n self.resAtomSwapDict[prochiralResonances[0]] = atomTuple2\n self.resAtomSwapDict[prochiralResonances[1]] = atomTuple1\n\n # Reset triplets info\n self.triplets[prochiralKey] = {}\n self.triplets[prochiralKey][atomTuple1] = [prochiralResonances[1]]\n self.triplets[prochiralKey][atomTuple2] = [prochiralResonances[0]]\n\n elif len(prochiralResonances) == 1:\n resSet = prochiralResonances[0].resonanceSet\n atomSet1 = resSet.sortedAtomSets()[0]\n\n otherAtoms = self.resAtomSwapDict[prochiralResonances[0]]\n\n otherAtomSet = otherAtoms[0].findFirstFixedAtomSet(nmrConstraintStore=self.nmrConstraintStore)\n if not otherAtomSet:\n otherAtomSet = self.nmrConstraintStore.newFixedAtomSet(atoms = otherAtoms)\n \n if otherAtomSet != atomSet1:\n resSet.addAtomSet(otherAtomSet)\n atomSet1.removeResonanceSet(resSet)\n\n # Reset some dictionaries as well - note that resAtomSwapDict gives atoms of the *other* prochiral, so below is correct!\n atomTuple1 = tuple(atomSet1.sortedAtoms())\n \n else:\n # Same atomSet, possible for HD1/2 HE1/2 aromatics\n atomTuple1 = otherAtoms\n\n self.resAtomSwapDict[prochiralResonances[0]] = atomTuple1\n\n # Reset triplets info\n self.triplets[prochiralKey] = {}\n self.triplets[prochiralKey][atomTuple1] = []\n self.triplets[prochiralKey][otherAtomSet] = [prochiralResonances[0]]\n\n #\n # Then do deassigning. and track info for final printout...\n #\n\n finalList = {}\n\n self.swapTypes = [self.SWAP_TYPE_ORG] # Swapped not necessary any more\n priorityCount = 0\n\n for priorityKey in priorityKeys:\n priorityCount += 1\n for prochiralKey in prochiralPriority[priorityKey]:\n\n if debug:\n print prochiralKey\n\n (prochiralViolationInfo,allConstraintItems) = self.checkProchiralKeyConstraints(prochiralKey,debug=debug)\n\n #\n # Now check whether needs to be deassigned\n #\n\n finalSwapType = self.SWAP_TYPE_ORG\n\n numViol = {}\n deassign = False\n\n violResultTupleList = prochiralViolationInfo[finalSwapType][self.REQUIRES_DEASSIGNMENT_STR]\n for violationCodeToTest in self.violationCodeList:\n if violationCodeToTest in self.VIOLATION_CODE_REPORTING_LIST:\n continue\n if deassign:\n continue\n fractionByViolationCode = self.violationCodes[violationCodeToTest]['fraction']\n# numViol[violationCodeToTest] = 0\n for violationCode, violationList in violResultTupleList:\n if violationCodeToTest != violationCode:\n continue\n # Look for every violationCodeToTest (a large single model cutoff and a smaller multi model cutoff) if fraction is met.\n numViol = self.numModels - violationList.count(0.0)\n fractionFound = ( 1.0 * numViol ) / self.numModels\n if fractionFound >= fractionByViolationCode: # inclusive\n if debug:\n print \"DEBUG: DEASSIGNING BASED ON %s %s\" % (violationCode, str(prochiralViolationInfo[finalSwapType][self.REQUIRES_DEASSIGNMENT_STR]))\n deassign = True\n Deassign_count += 1\n break # no need to look at other potentially qualifying restraints\n # end for\n # end for violationCodeToTest\n\n # Retrieve the swap info...\n (swapAssignment,finalSwapType,energyDifferencePercentage,totalEnergyDiff, totalEnergyHighState, totalEnergyLowState,\n favouredPercent,swappedFavouredFraction,tripletIdx) = swapInfo[prochiralKey]\n\n chainCode = prochiralKey[0]\n seqId = prochiralKey[1]\n chemAtomSetName = prochiralKey[2].name\n ccpCode = prochiralKey[2].chemComp.ccpCode\n totalConstraints = priorityKey[0]\n ambiguousConstraints = priorityKey[1]\n\n maximum_violation = orgMaxViolation[ prochiralKey]\n violation_single_model_criterium_count = orgViolationSingleModelCriteriumCount[prochiralKey]\n violation_multi_model_criterium_count = orgViolationMultiModelCriteriumCount[ prochiralKey]\n\n # chainCode, seqId, ccpCode, chemAtomSetName, swapAssignment, favouredPercent, totalEnergyDiff, totalConstraints, unambiguousStereoConstraints, deassign, numVeryLargeViol, numLargeViol\n# dummyIdxForComparisonWithWattos = '1' # TODO: reset to sensible output. chainCode\n# mapChainId2Idx = { 'A': '1', 'B': '2', 'C': '3' }\n# if mapChainId2Idx.has_key(chainCode):\n# dummyIdxForComparisonWithWattos = mapChainId2Idx[chainCode]\n pseudoNameKey = '%s,%s' % (ccpCode.upper(), chemAtomSetName)\n iupacPseudo = chemAtomSetName\n if self.mapCcpn2IupacPseudo.has_key(pseudoNameKey):\n iupacPseudo = self.mapCcpn2IupacPseudo[ pseudoNameKey ]\n lineItem = \"%1s %4d %5s %-10s\" % ( chainCode, seqId, ccpCode.upper(), iupacPseudo )\n lineItem += \" %3d %-3s %7.1f %7.1f %6.1f\" % ( tripletIdx, booleanPythonToJavaStr(swapAssignment), favouredPercent, energyDifferencePercentage, totalEnergyDiff )\n lineItem += \" %6.1f %6.1f %3d\" % ( totalEnergyHighState, totalEnergyLowState, totalConstraints )\n lineItem += \" %3d\" % ( ambiguousConstraints )\n lineItem += \" %-5s %7.3f\" % ( booleanPythonToJavaStr(deassign), maximum_violation )\n lineItem += \" %3d %3d\" % ( violation_single_model_criterium_count, violation_multi_model_criterium_count)\n \n if totalConstraints:\n finalList[(chainCode,seqId,chemAtomSetName)] = lineItem\n else:\n print \"warning skipping triplet without restraints: \" + lineItem\n invalidTripletCount += 1\n # end if\n \n #\n # Now make changes in CCPN... deassignment gets priority over swapping.\n #\n\n\n if deassign:\n\n violationCode = 'xxx'\n fractionViolated = 0.00\n\n prochiralResonances = []\n for resList in self.triplets[prochiralKey].values():\n for resonance in resList:\n if not resonance in prochiralResonances:\n prochiralResonances.append(resonance)\n\n self.resetConstraintItems(allConstraintItems,prochiralResonances, prochiralKey,violationCode,fractionViolated,verbose=False)\n\n #\n # Print out for checking\n #\n \n if outputType == 'custom':\n \n print \"\"\"# Columns below (* means new):\n# 1 chainCode\n# 2 seqId\n# 3 ccpCode\n# 4 chemAtomSetName\n# 5 priority (1 was handled first)\n# 6 swapAssignment\n# 7 favouredPercent (so for the swapped state if swapped!)\n# 8 energyDifferencePercentage (*)\n# 9 totalEnergyDiff ensemble averaged\n# 10 totalEnergyHighState ensemble averaged (*)\n# 11 totalEnergyLowState ensemble averaged (*)\n# 12 totalConstraints\n# 13 ambiguousConstraints (optional)\n# 14 deassign\n# 15 maximumViolation (pre processing)\n# 16 numVeryLargeViol (post processing TODO: check)\n# 17 numLargeViol (post processing TODO: check)\n\"\"\"\n\n finalIds = finalList.keys()\n finalIds.sort()\n\n meat = ''\n\n for finalId in finalIds:\n if outputType == 'custom':\n print finalList[finalId]\n else:\n meat += str( finalList[finalId] ) + '\\n'\n\n \n #\n # NMR-STAR Wattos type output\n #\n # meat = \"\"\"\n # A 4 Met HB* 82 False 100.0 0.000 2 0 False 0.000 0 0\n # A 5 Arg HD* 81 False 100.0 0.000 4 2 False 0.000 0 0\n # A 6 Leu HB* 23 False 90.0 14.328 26 7 True 1.812 11 0\n #\n # 1 6 LEU QB 22 no 90.0 78.6 8.803 11.204 2.402 26 10 yes 2.200 11 11\n # 1 6 LEU QD 8 no 5.0 0.0 0.000 1.649 1.649 34 14 yes 1.651 19 22\n # 1 9 GLU QG 96 no 100.0 0.0 0.000 0.000 0.000 10 0 no 0.000 0 0\n #\"\"\"\n\n if outputType == 'NMRSTAR':\n\n # Let's do the same with a STAR table.\n if invalidTripletCount:\n print \"Warning: found triplets without restraints.\"\n validTripletCount = Triplet_count - invalidTripletCount\n if validTripletCount < 1:\n print \"Error: found no triplets with restraints.\"\n return True\n validTripletCount2 = len(finalIds) # double check.\n if validTripletCount != validTripletCount2:\n print \"Error: found number of triplets with restraints %d but number of report list %d\" % ( validTripletCount, validTripletCount2)\n# return True\n \n Swap_percentage = ( 100.0 * Swap_count ) / validTripletCount\n Deassign_percentage = ( 100.0 * Deassign_count ) / validTripletCount\n Model_count = self.numModels\n Crit_abs_e_diff = energy_abs_criterium\n Crit_rel_e_diff = energy_rel_criterium\n Crit_mdls_favor_pct = swapPercentage\n Crit_sing_mdl_viol = self.violationCodes['xl']['violation']\n Crit_multi_mdl_viol = self.violationCodes['l']['violation']\n Crit_multi_mdl_pct = self.violationCodes['l']['fraction'] * 100.0\n\n header = \"\"\"data_entry\n\n\n save_assign_stereo\n _Stereo_assign_list.Sf_category stereo_assignments\n _Stereo_assign_list.Triplet_count %s\n _Stereo_assign_list.Swap_count %s\n _Stereo_assign_list.Swap_percentage %.1f\n _Stereo_assign_list.Deassign_count %s\n _Stereo_assign_list.Deassign_percentage %.1f\n _Stereo_assign_list.Model_count %s\n _Stereo_assign_list.Total_e_low_states %.1f\n _Stereo_assign_list.Total_e_high_states %.1f\n _Stereo_assign_list.Crit_abs_e_diff %.1f\n _Stereo_assign_list.Crit_rel_e_diff %.1f\n _Stereo_assign_list.Crit_mdls_favor_pct %.1f\n _Stereo_assign_list.Crit_sing_mdl_viol %.3f\n _Stereo_assign_list.Crit_multi_mdl_viol %.3f\n _Stereo_assign_list.Crit_multi_mdl_pct %.1f\"\"\" % (\n validTripletCount,\n Swap_count,\n Swap_percentage,\n Deassign_count,\n Deassign_percentage,\n Model_count,\n Total_e_low_states,\n Total_e_high_states,\n Crit_abs_e_diff,\n Crit_rel_e_diff,\n Crit_mdls_favor_pct,\n Crit_sing_mdl_viol,\n Crit_multi_mdl_viol,\n Crit_multi_mdl_pct\n )\n\n\n explanations = \"\"\"\n _Stereo_assign_list.Details\n;\n\nDescription of the tags in this list:\n* 1 * NMR-STAR 3 administrative tag\n* 2 * NMR-STAR 3 administrative tag\n* 3 * NMR-STAR 3 administrative tag\n* 4 * Number of triplets (atom-group pair and pseudo)\n* 5 * Number of triplets that were swapped\n* 6 * Percentage of triplets that were swapped\n* 7 * Number of deassigned triplets\n* 8 * Percentage of deassigned triplets\n* 9 * Number of models in ensemble\n* 10 * Energy of the states with the lower energies summed for all triplets (Ang.**2) ensemble averaged\n* 11 * Energy of the states with the higher energies summed for all triplets (Ang.**2) ensemble averaged\n* 12 * Item 9-8\n* 13 * Criterium for swapping assignment on the absolute energy difference (Ang.**2)\n* 14 * Criterium for swapping assignment on the relative energy difference (Ang.**2)\n* 15 * Criterium for swapping assignment on the percentage of models favoring a swap\n* 16 * Criterium for deassignment on a single model violation (Ang.)\n* 17 * Criterium for deassignment on a multiple model violation (Ang.)\n* 18 * Criterium for deassignment on a percentage of models\n* 19 * this tag\n\nDescription of the tags in the table below:\n* 1 * Chain identifier (can be absent if none defined)\n* 2 * Residue number\n* 3 * Residue name\n* 4 * Name of pseudoatom representing the triplet\n* 5 * Ordinal number of assignment (1 is assigned first)\n* 6 * 'yes' if assignment state is swapped with respect to restraint file\n* 7 * Percentage of models in which the assignment with the lowest\n overall energy is favored\n* 8 * Percentage of difference between lowest and highest overall energy\n with respect to the highest overall energy\n* 9 * Difference between lowest and highest overall energy ensemble averaged\n* 10 * Energy of the highest overall energy state (Ang.**2) ensemble averaged\n* 11 * Energy of the lowest overall energy state (Ang.**2) ensemble averaged\n* 12 * Number of restraints involved with the triplet. The highest ranking\n triplet on this number, is assigned first (optional)\n* 13 * Number of restraints involved with the triplet that are ambiguous\n besides the ambiguity from this triplet\n* 14 * 'yes' if restraints included in this triplet are deassigned\n* 15 * Maximum unaveraged violation before deassignment (Ang.)\n* 16 * Number of violated restraints above threshold for a single model\n before deassignment (given by Single_mdl_crit_count)\n* 17 * Number of violated restraints above threshold for a multiple models\n before deassignment (given by Multi_mdl_crit_count)\n;\n\n\n loop_\n _Stereo_assign.Chain_ID\n _Stereo_assign.Comp_index_ID\n _Stereo_assign.Comp_ID\n _Stereo_assign.Pseudo_Atom_ID\n _Stereo_assign.Num\n _Stereo_assign.Swapped\n _Stereo_assign.Models_favoring_pct\n _Stereo_assign.Energy_difference_pct\n _Stereo_assign.Energy_difference\n _Stereo_assign.Energy_high_state\n _Stereo_assign.Energy_low_state\n _Stereo_assign.Constraint_count\n \"\"\"\n # if not self.compareWithWattos:\n explanations += \" _Stereo_assign.Constraint_ambi_count\\n\"\n # end if\n explanations += \"\"\" _Stereo_assign.Deassigned\n _Stereo_assign.Violation_max\n _Stereo_assign.Single_mdl_crit_count\n _Stereo_assign.Multi_mdl_crit_count\n\n\"\"\"\n\n footer = \"\"\" stop_\n\n save_\n\n \"\"\"\n\n\n star_text = header + explanations + meat + footer\n\n starFile = File()\n if starFile.read(text=star_text):\n print \"Error: reading STAR text by STAR api.\"\n return True\n if starFile.check_integrity():\n print \"Error: STAR text failed integrity check.\"\n return True\n starFile.filename = outputFileName\n if starFile.write():\n print \"Error: writing file %\" % outputFileName\n return True\n if not os.path.exists(outputFileName):\n print \"Error: failed to find STAR file %s\" % outputFileName\n return True\n# print \"Written meta data to STAR file: %s\" % outputFileName # already printed by write()\n \n \n self.storeToAppData( star_text )\n # end def", "def trim(self, ratio=10000):\n trimmed, total = 0, 0\n for sources in self.sources():\n for s in (self.tp_by_source_and_text[sources],\n self.fp_by_source_and_text[sources],\n self.fn_by_source_and_text[sources],\n self.overlap_by_source_and_text[sources]):\n try:\n max_count = s.most_common(1)[0][1]\n except IndexError:\n continue\n for k, v in list(s.items()):\n if v * ratio < max_count:\n trimmed += 1\n del s[k]\n total += 1\n print(f'trimmed {trimmed}/{total} ({trimmed/total:.1%})',\n file=sys.stderr, flush=True)", "def do_semiglobal_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = local_setup(len(seq1), len(seq2))\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max score\n scoring[i].append(max([xgap, ygap, match]))\n\n # find the max score (only the last max score)\n max_i, max_j, max_score = 0, 0, -float('inf')\n for j in range(len(scoring[-1])): # find max low road\n if scoring[-1][j] >= max_score:\n max_i, max_j, max_score = -1, j, scoring[-1][j]\n\n for i in range(len(scoring)): # find max high road (priority)\n if scoring[i][-1] >= max_score:\n max_i, max_j, max_score = i, -1, scoring[i][-1]\n\n # perform traceback\n alignment = traceback(\n scoring, seq1, seq2, penalty, matrix, max_i, max_j, semi=True\n )\n\n # add the endgaps for seq1\n if max_i == -1 and max_j != len(scoring[-1]):\n for j in range(max_j + 1, len(scoring[-1])):\n alignment[0][0] += '-'\n alignment[1][0] += ' '\n alignment[2][0] += seq2[j]\n\n # add the endgaps for seq2\n if max_j == -1 and max_i != len(scoring):\n for i in range(max_i + 1, len(scoring)):\n alignment[0][0] += seq1[i]\n alignment[1][0] += ' '\n alignment[2][0] += '-'\n\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring", "def filter_dicom_info_by_best_spacing(info_df):\n output_indices = (\n info_df\n .groupby('AccessionNumber')\n .agg({'SpacingZ': 'idxmin'})\n )\n info_df = info_df.loc[output_indices.loc[:, 'SpacingZ'], :]\n return info_df", "def _clean_hits(reads):\n new_reads = defaultdict(realign)\n for r in reads:\n world = {}\n sc = 0\n for p in reads[r].precursors:\n world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence))\n if sc < world[p]:\n sc = world[p]\n new_reads[r] = reads[r]\n for p in world:\n logger.debug(\"score %s %s %s\" % (r, p, world[p]))\n if sc != world[p]:\n logger.debug(\"remove %s %s %s\" % (r, p, world[p]))\n new_reads[r].remove_precursor(p)\n\n return new_reads", "def match_align(self, match_only=True):\n\n print('Running match_align')\n\n # Note about alignment parameters: Used to have a small -0.1\n # penalty for gap creation & extension. Got rid of this in favor\n # of getting better alignments, but the downside is that for\n # cases where you want to know which residues are mutants of\n # each other, you don't get that info anymore.\n alignments = pairwise2.align.globalxs(self.target_sequence,\\\n self.mobile_sequence, 0, 0)\n\n #for alignment in alignments:\n if not alignments:\n return False\n print(alignments)\n best_rmsd = 9999\n i = 0\n for i in range(0, len(alignments)):\n bb_rmsd = self.align_sequences(alignments, i,\n match_only=match_only)\n if bb_rmsd < best_rmsd:\n best_rmsd = bb_rmsd\n best_i = i\n self.bb_rmsd = self.align_sequences(alignments, best_i,\n match_only=match_only)\n\n return True", "def extract_valid_gt_data(all_data, remove_ofv=False):\n distractor_classes = [2, 7, 8, 12]\n valid_classes = [1]\n original = all_data.shape[0]\n\n # remove classes in other classes, pedestrain and distractors\n # left for furthur usages\n selected = np.array([\n i for i in range(all_data.shape[0])\n if all_data[i, 7] in valid_classes + distractor_classes])\n all_data = all_data[selected, :]\n\n # remove boxes whose centers is out of view\n # Cause this tool is not only set for MOT, thus resolution is not assumed\n # provided. In MOT, the maximum width andd height should be taken into\n # consirderation\n\n # PS: As stated by author of MOT benchmark, it would be better the tracker\n # could figure out the out of view pedestrain like human does. Thus no\n # filtering\n if remove_ofv: # remove out of view for ground truth\n selected = np.array([i for i in range(all_data.shape[0])\n if (all_data[i, 2] + all_data[i, 4]) / 2 >= 0 and\n (all_data[i, 3] + all_data[i, 5]) / 2 >= 0])\n\n # not consider right and bottom out of range here. Anyway ofv is not\n # removed in MOT2016\n # selected = np.array([i for i in xrange(all_data.shape[0])\n # if (all_data[i, 2] + all_data[i, 4]) / 2 != 0\n # ])\n all_data = all_data[selected, :]\n\n # remove non-human classes from ground truth,\n # and return distractor identities\n cond = np.array(\n [i in valid_classes + distractor_classes for i in all_data[:, 7]])\n selected = np.where(cond == True)[0]\n all_data = all_data[selected, :] # not necessary?\n\n print('[GT PREPROCESSING]: Removing non-people classes, remaining '\n '{}/{} boxes'.format(all_data.shape[0], original))\n cond = np.array([i in distractor_classes for i in all_data[:, 7]])\n selected = np.where(cond == True)[0]\n\n all_dsitractor_ids = all_data[selected, 1]\n unique_distractor_ids = np.unique(all_dsitractor_ids)\n return all_data, unique_distractor_ids", "def __cullArchive(self):\n if len(self.genomes) <= self.max_size:\n return\n\n n_delete = len(self.genomes) - self.max_size\n indices = sorted([(lf, i) for i,lf in enumerate(self.local_fitnesses)])\n to_delete = set( i for _, i in indices[:n_delete] )\n self.genomes = [g for i,g in enumerate(self.genomes) if i not in to_delete]\n self.fitnesses = [f for i,f in enumerate(self.fitnesses) if i not in to_delete]\n self.features = [f for i,f in enumerate(self.features) if i not in to_delete]\n self.local_fitnesses = [f for i,f in enumerate(self.local_fitnesses) if i not in to_delete]\n\n assert len(self.genomes) <= self.max_size\n assert len(self.genomes) == len(self.fitnesses)\n assert len(self.genomes) == len(self.features)\n assert len(self.genomes) == len(self.local_fitnesses)", "def trimEndGaps(aligned_consensus, aligned_mutant):\n\tn_leading_gaps = 0\n\tn_trailing_gaps = 0\n\twhile aligned_consensus[0] == \"-\":\n\t\tn_leading_gaps += 1\n\t\taligned_consensus = aligned_consensus[1:]\n\twhile aligned_consensus[-1] == \"-\":\n\t\tn_trailing_gaps += 1\n\t\taligned_consensus = aligned_consensus[:-1]\n\ttrimmed_consensus = aligned_consensus\n\ttrimmed_mutant = aligned_mutant[n_leading_gaps:len(aligned_mutant)-n_trailing_gaps]\n\treturn trimmed_consensus, trimmed_mutant" ]
[ "0.6130298", "0.59186476", "0.58583313", "0.57613075", "0.55340576", "0.54292566", "0.53562945", "0.52083033", "0.52064", "0.51962346", "0.5181664", "0.5160797", "0.51300323", "0.51119745", "0.50493014", "0.50335264", "0.4981057", "0.49788785", "0.4958578", "0.49504927", "0.49388942", "0.49156594", "0.48846948", "0.48767528", "0.4876117", "0.48705712", "0.48561272", "0.4818243", "0.48122495", "0.47921988" ]
0.8033812
0
Starting from the alignment in ``alignment_info``, look at neighboring alignments iteratively for the best one, according to Model 4 Note that Model 4 scoring is used instead of Model 5 because the latter is too expensive to compute. There is no guarantee that the best alignment in the alignment space will be found, because the algorithm might be stuck in a local maximum.
def hillclimb(self, alignment_info, j_pegged=None): alignment = alignment_info # alias with shorter name max_probability = IBMModel4.model4_prob_t_a_given_s(alignment, self) while True: old_alignment = alignment for neighbor_alignment in self.neighboring(alignment, j_pegged): neighbor_probability = IBMModel4.model4_prob_t_a_given_s( neighbor_alignment, self ) if neighbor_probability > max_probability: alignment = neighbor_alignment max_probability = neighbor_probability if alignment == old_alignment: # Until there are no better alignments break alignment.score = max_probability return alignment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find(self) -> bool:\n alignments = []\n for sw_idx in range(len(self.sw)):\n for nu_idx in range(len(self.nu)):\n alignments.append(Alignment(self.nu, self.sw, nu_idx, sw_idx, self.orig_nu))\n alignment = max(alignments, key=lambda align: align.score)\n if alignment.score > 0:\n self.alignment = alignment\n return True\n return False", "def getAlignment(self):\n # Code to complete - generated by traceback through matrix to generate aligned pairs\n \n # find the position of the max_value\n max_value = self.getMaxAlignmentScore()\n max_pos = tuple(numpy.argwhere(self.matrix == max_value)[-1])\n x_pos = max_pos[0]; y_pos = max_pos[1]\n\n # array that holds the tuples\n path = list()\n\n # now find the path to the 0\n \n while self.matrix[x_pos][y_pos] != 0:\n \n # if diagonal is a match take that as priority\n if self.string1[x_pos - 1] == self.string2[y_pos - 1]:\n path.append((x_pos - 1, y_pos - 1))\n x_pos -=1; y_pos -= 1\n continue\n\n # finds the best horizontal alignment\n bestX = 0; bestY = y_pos - 1\n for i in range(x_pos - 1):\n if self.matrix[i][y_pos - 1] >= self.matrix[bestX][bestY]:\n bestX = i\n \n # finds best vertical alignment\n bestX_vertical = x_pos - 1; bestY_vertical = 0\n for i in range(y_pos - 1):\n if self.matrix[x_pos - 1][i] >= self.matrix[bestX_vertical][bestY_vertical]:\n bestY_vertical = i\n \n # if diagonal not satisfied, see which is better\n # the horizontal of vertical alignment.\n if self.matrix[bestX][bestY] < self.matrix[bestX_vertical][bestY_vertical]:\n path.append((bestX_vertical, bestY_vertical))\n x_pos = bestX_vertical; y_pos = bestY_vertical\n else:\n path.append((bestX, bestY))\n x_pos = bestX; y_pos = bestY\n\n return path[::-1] # reversed because we want origin to highest element.", "def compute_local_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n best_score = 0\n len_m, len_n = len(seq_x), len(seq_y)\n best_i = 0\n best_j = 0\n x_ret, y_ret = '', ''\n for idx_i in range(len_m+1):\n for idx_j in range(len_n+1):\n if alignment_matrix[idx_i][idx_j] > best_score:\n best_score = alignment_matrix[idx_i][idx_j]\n best_i = idx_i\n best_j = idx_j\n idx_i = best_i\n idx_j = best_j\n while idx_i != 0 and idx_j != 0:\n if alignment_matrix[idx_i][idx_j] == 0:\n return (best_score, x_ret, y_ret)\n if alignment_matrix[idx_i][idx_j] == (alignment_matrix[idx_i-1][idx_j-1] +\n scoring_matrix[seq_x[idx_i-1]][seq_y[idx_j-1]]):\n # score from diagnoal cell\n x_ret = (seq_x[idx_i-1]) + x_ret\n y_ret = (seq_y[idx_j-1]) + y_ret\n idx_i -= 1\n idx_j -= 1\n elif alignment_matrix[idx_i][idx_j] == (alignment_matrix[idx_i-1][idx_j] +\n scoring_matrix[seq_x[idx_i-1]]['-']):\n # score from above cell\n x_ret = (seq_x[idx_i - 1]) + x_ret\n y_ret = ('-') + y_ret\n idx_i -= 1\n else:\n # score from left cell\n x_ret = ('-') + x_ret\n y_ret = (seq_y[idx_j - 1]) + y_ret\n idx_j -= 1\n while idx_i != 0:\n if alignment_matrix[idx_i][idx_j] == 0:\n return (best_score, x_ret, y_ret)\n\n # idx_j = 0, move upward along first column\n x_ret = (seq_x[idx_i - 1]) + x_ret\n y_ret = ('-') + y_ret\n idx_i -= 1\n while idx_j != 0:\n if alignment_matrix[idx_i][idx_j] == 0:\n return (best_score, x_ret, y_ret)\n\n # idx_i = 0, move left along first row\n x_ret = ('-') + x_ret\n y_ret = (seq_y[idx_j - 1]) + y_ret\n idx_j -= 1\n return (best_score, x_ret, y_ret)", "def match_align(self, match_only=True):\n\n print('Running match_align')\n\n # Note about alignment parameters: Used to have a small -0.1\n # penalty for gap creation & extension. Got rid of this in favor\n # of getting better alignments, but the downside is that for\n # cases where you want to know which residues are mutants of\n # each other, you don't get that info anymore.\n alignments = pairwise2.align.globalxs(self.target_sequence,\\\n self.mobile_sequence, 0, 0)\n\n #for alignment in alignments:\n if not alignments:\n return False\n print(alignments)\n best_rmsd = 9999\n i = 0\n for i in range(0, len(alignments)):\n bb_rmsd = self.align_sequences(alignments, i,\n match_only=match_only)\n if bb_rmsd < best_rmsd:\n best_rmsd = bb_rmsd\n best_i = i\n self.bb_rmsd = self.align_sequences(alignments, best_i,\n match_only=match_only)\n\n return True", "def find_matching_seqs_from_alignment(sequences, ref_sequence):\n\n # if the first sequence (gaps removed) in MSA matches with reference,\n # return this sequence.\n first_seq_in_alignment = sequences[0] \n #first_seq_in_alignment_gaps_removed = first_seq_in_alignment.replace('-','')\n first_seq_in_alignment_gaps_removed = find_and_replace(first_seq_in_alignment, '-','')\n if first_seq_in_alignment_gaps_removed == ref_sequence:\n print('\\n\\tFirst sequence in alignment (gaps removed) matches reference,'\n '\\n\\tSkipping regorous search for matching sequence'\n )\n first_seq = list()\n first_seq.append(first_seq_in_alignment)\n return first_seq\n pairwise_scores = []\n for seq_indx, seq in enumerate(sequences):\n #seq_gaps_removed = seq.replace('-','')\n seq_gaps_removed = find_and_replace(seq, '-', '')\n print(seqs_gaps_removed)\n\n score = align_pairs_local(\n ref_sequence,\n seq_gaps_removed,\n score_only = True,\n )\n score_at_indx = (seq_indx, score)\n pairwise_scores.append(score_at_indx)\n\n seq_indx, max_score = max(pairwise_scores, key=lambda x: x[1])\n matching_seqs_indx = [\n indx for indx, score in pairwise_scores if score == max_score\n ]\n\n best_matching_seqs = [\n sequences[indx] for indx in matching_seqs_indx\n ]\n num_matching_seqs = len(best_matching_seqs)\n if num_matching_seqs > 1 :\n print('\\n\\tFound %d sequences in MSA that match the reference'\n '\\n\\tThe first sequence is taken as matching'% num_matching_seqs\n )\n return best_matching_seqs", "def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n align_x = \"\"\n align_y = \"\"\n\n len_x = len(seq_x)\n len_y = len(seq_y)\n\n #score = max([alignment_matrix[row][col] for row in range(len_x + 1) for col in range(len_y+1)])\n\n max_score = -1\n max_positions = []\n for row in range(len(seq_x)+1):\n for col in range(len(seq_y)+1):\n if alignment_matrix[row][col] == max_score:\n max_positions.append((row,col))\n if alignment_matrix[row][col] > max_score:\n max_score = alignment_matrix[row][col]\n max_positions = [(row, col)]\n max_row, max_col = random.choice(max_positions)\n\n #print max_score, max_row, max_col\n\n len_x = max_row\n len_y = max_col\n\n while alignment_matrix[len_x][len_y] > 0:\n #print len_x, len_y\n if alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y - 1] + scoring_matrix[seq_x[len_x-1]][seq_y[len_y-1]]:\n align_x = seq_x[len_x-1] + align_x\n align_y = seq_y[len_y-1] + align_y\n len_x -= 1\n len_y -= 1\n elif alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y] + scoring_matrix[seq_x[len_x-1]][\"-\"]:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n #while len_x > 0:\n # align_x = seq_x[len_x-1] + align_x\n # align_y = \"-\" + align_y\n # len_x -= 1\n\n #while len_y > 0:\n # align_x = \"-\" + align_x\n # align_y = seq_y[len_y-1] + align_y\n # len_y -= 1\n\n return (max_score, align_x, align_y)", "def do_local_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = local_setup(len(seq1), len(seq2))\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max score (including 0)\n scoring[i].append(max([xgap, ygap, match, 0]))\n\n # find the max score (only the last max score)\n max_i, max_j, max_score = 0, 0, -float('inf')\n for i in range(len(scoring)):\n for j in range(len(scoring[i])):\n if scoring[i][j] > max_score:\n max_i, max_j, max_score = i, j, scoring[i][j]\n\n # perform traceback\n alignment = traceback(\n scoring, seq1, seq2, penalty, matrix, max_i, max_j, local=True\n )\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring", "def do_global_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = global_setup(len(seq1), len(seq2), penalty)\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max value of them all\n scoring[i].append(max([xgap, ygap, match]))\n\n # Perform traceback\n alignment = traceback(scoring, seq1, seq2, penalty, matrix)\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring", "def do_semiglobal_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = local_setup(len(seq1), len(seq2))\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max score\n scoring[i].append(max([xgap, ygap, match]))\n\n # find the max score (only the last max score)\n max_i, max_j, max_score = 0, 0, -float('inf')\n for j in range(len(scoring[-1])): # find max low road\n if scoring[-1][j] >= max_score:\n max_i, max_j, max_score = -1, j, scoring[-1][j]\n\n for i in range(len(scoring)): # find max high road (priority)\n if scoring[i][-1] >= max_score:\n max_i, max_j, max_score = i, -1, scoring[i][-1]\n\n # perform traceback\n alignment = traceback(\n scoring, seq1, seq2, penalty, matrix, max_i, max_j, semi=True\n )\n\n # add the endgaps for seq1\n if max_i == -1 and max_j != len(scoring[-1]):\n for j in range(max_j + 1, len(scoring[-1])):\n alignment[0][0] += '-'\n alignment[1][0] += ' '\n alignment[2][0] += seq2[j]\n\n # add the endgaps for seq2\n if max_j == -1 and max_i != len(scoring):\n for i in range(max_i + 1, len(scoring)):\n alignment[0][0] += seq1[i]\n alignment[1][0] += ' '\n alignment[2][0] += '-'\n\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring", "def compute_local_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n #initialization of variables\n x_pos = -1\n y_pos = -1\n result_seq_x = ''\n result_seq_y = ''\n score = 0\n\n #determine start position in alignment_matrix as position with maximum value \n for row in range(len(seq_x) + 1):\n for col in range(len(seq_y) + 1):\n if alignment_matrix[row][col] > score:\n score = alignment_matrix[row][col]\n x_pos = row\n y_pos = col\n\n #start in start position and go upwards till we reach first entry with value 0\n #in every iteration we reconstruct alignments based on value in alignment_matrix and scoring_matrix\n while x_pos != 0 and y_pos !=0:\n current_value = alignment_matrix[x_pos][y_pos]\n if current_value == 0:\n break\n \n if current_value == alignment_matrix[x_pos-1][y_pos-1] + scoring_matrix[seq_x[x_pos-1]][seq_y[y_pos-1]]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n x_pos -= 1\n y_pos -= 1\n elif current_value == alignment_matrix[x_pos-1][y_pos] + scoring_matrix[seq_x[x_pos-1]][\"-\"]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = \"-\" + result_seq_y\n x_pos -= 1\n else: \n result_seq_x = \"-\" + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n y_pos -= 1\n\n return (score,result_seq_x,result_seq_y)", "def global_alignment(first_seq, second_seq, match_penalty_value, mismatch_penalty_value, gap_penalty_value):\n alignment_matrix = initiate_matrix(first_seq, second_seq, gap_penalty_value)\n path_matrix = np.zeros((alignment_matrix.shape[0], alignment_matrix.shape[1], 3), dtype=str)\n scores = []\n \"\"\" Second step is to apply get the max score method, then assign it to the current cell. \"\"\"\n for i in range(1, alignment_matrix.shape[0]):\n for j in range(1, alignment_matrix.shape[1]):\n row_score = alignment_matrix[i, j - 1] + gap_penalty_value\n column_score = alignment_matrix[i - 1, j] + gap_penalty_value\n if second_seq[i - 1] == first_seq[j - 1]:\n diagonal_score = alignment_matrix[i - 1, j - 1] + match_penalty_value\n else:\n diagonal_score = alignment_matrix[i - 1, j - 1] + mismatch_penalty_value\n scores.append(row_score)\n scores.append(column_score)\n scores.append(diagonal_score)\n alignment_matrix[i, j], ex_cell = get_max_score(scores)\n scores.clear()\n for I in range(ex_cell.size):\n if ex_cell[0][I] == 0:\n path_matrix[i, j, 1] = \"S\"\n elif ex_cell[0][I] == 1:\n path_matrix[i, j, 1] = \"F\"\n elif ex_cell[0][I] == 2:\n path_matrix[i, j, 1] = \"D\"\n\n max_score = alignment_matrix[i, j]\n \"\"\" Third step is to trace back.\"\"\"\n f, s = trace_back(path_matrix, first_seq, second_seq, match_penalty_value, mismatch_penalty_value,\n gap_penalty_value)\n \"\"\"Last step is to check the max score with the aligned sequences score. \"\"\"\n new_s, check = check_alignment_score(f, s, max_score, match_penalty_value, mismatch_penalty_value,\n gap_penalty_value)\n return f, s, new_s, check", "def find_best_align(s1, s2, l1, l2):\n\n my_best_align = None\n my_best_score = -1\n\n for i in range(l1): # Note that you just take the last alignment with the highest score\n z = calculate_score(s1, s2, l1, l2, i)\n if z > my_best_score:\n my_best_align = \".\" * i + s2 # prints number of '.' to get to startpoint (which is i here)\n my_best_score = z\n\n # Formatted output\n print(my_best_align)\n print(s1)\n print(\"Best score:\", my_best_score)\n\n return my_best_align, my_best_score", "def get_alignment(a: np.ndarray, max_alignment: int = 128) -> int:\n # Check max_alignment\n if bin(max_alignment).count('1') != 1:\n raise ValueError(\"'max_alignment' must be a power of 2.\")\n\n # Get largest base\n b = int(np.log2(max_alignment))\n\n # Get best alignment\n return next(2**x for x in range(b, 0, -1) if (a.ctypes.data % 2**x) == 0)", "def compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n num_rows = len(seq_x)\n num_cols = len(seq_y)\n x_prime = ''\n y_prime = ''\n\n while num_rows != 0 and num_cols != 0:\n if alignment_matrix[num_rows][num_cols] == alignment_matrix[num_rows-1][num_cols-1] + scoring_matrix[seq_x[num_rows-1]][seq_y[num_cols-1]]:\n x_prime = seq_x[num_rows-1] + x_prime\n y_prime = seq_y[num_cols-1] + y_prime\n num_rows -= 1\n num_cols -= 1\n else:\n if alignment_matrix[num_rows][num_cols] == alignment_matrix[num_rows-1][num_cols] + scoring_matrix[seq_x[num_rows-1]]['-']:\n x_prime = seq_x[num_rows-1] + x_prime\n y_prime = '-' + y_prime\n num_rows -= 1\n else:\n x_prime = '-' + x_prime\n y_prime = seq_y[num_cols-1] + y_prime\n num_cols -= 1\n \n while num_rows != 0:\n x_prime = seq_x[num_rows-1] + x_prime\n y_prime = '-' + y_prime\n num_rows -= 1\n\n while num_cols != 0:\n x_prime = '-' + x_prime\n y_prime = seq_y[num_cols-1] + y_prime\n num_cols -= 1\n\n # compute score of alignment\n score = 0\n for position in range(len(x_prime)):\n score += scoring_matrix[x_prime[position]][y_prime[position]]\n\n return (score, x_prime, y_prime)", "def local_align(x, y, gap, match, mismatch):\n # create a zero-filled matrix\n A = make_matrix(len(x) + 1, len(y) + 1)\n # make a copy of A to keep the path\n path = make_matrix(len(x) + 1, len(y) + 1)\n print(len(A[0]))\n print(len(A))\n # print(A[12][11])\n best = 0\n optloc = (0, 0)\n # fill in A in the right order\n for i in range(1, len(y)):\n for j in range(1, len(x)):\n print(\"Test\")\n # get the values of the neighbouring cells\n left = A[i][j - 1] + gap\n up = A[i - 1][j] + gap\n diagonally = A[i - 1][j - 1] + (match if x[i] == y[j] else mismatch)\n\n maxCell = max(left, up, diagonally, 0)\n\n # the local alignment recurrance rule:\n A[i][j] = maxCell\n\n # track the cell with the largest score\n if A[i][j] >= best:\n best = A[i][j]\n optloc = (i, j)\n\n # track the path in a matrix\n # 0 is left\n # 1 is up\n # 2 is diagonally\n # 3 is zero value\n if left == maxCell:\n path[i][j] = 0\n elif up == maxCell:\n path[i][j] = 1\n elif diagonally == maxCell:\n path[i][j] = 2\n else:\n path[i][j] = 3\n\n # track where we got\n # return the opt score and the best location\n return best, optloc, path, A", "def local_align(x, y, score=ScoreParam(AA, GG, AT, AC, AG, GC, gapPen)):\n\n # create a zero-filled matrix\n A = make_matrix(len(x) + 1, len(y) + 1)\n\n best = 0\n optloc = (0,0)\n\n alignOne = []\n alignTwo = []\n\n #trace = []\n\n # fill in A in the right order\n for i in xrange(1, len(x)+1):\n #iterTrace = []\n for j in xrange(1, len(y)+1):\n # the local alignment recurrance rule:\n A[i][j] = max(\n A[i][j-1] + score.gap,\n A[i-1][j] + score.gap,\n A[i-1][j-1] + score.matchchar(x[i-1], y[j-1]),\n #0\n )\n\n #Find traceback\n '''if A[i][j] == A[i][j-1] + score.gap:\n iterTrace.append((i, j-1))\n elif A[i][j] == A[i-1][j] + score.gap:\n iterTrace.append((i-1, j))\n elif A[i][j] == A[i-1][j-1] + score.matchchar(x[i-1], y[j-1]):\n iterTrace.append((i-1, j-1))'''\n\n # track the cell with the largest score\n if A[i][j] >= best:\n best = A[i][j]\n optloc = (i,j)\n alignOne.append(x[i-1])\n alignTwo.append(y[j-1])\n\n #trace.append(iterTrace)\n alOne = ''.join(alignOne)\n alTwo = ''.join(alignTwo)\n\n print \"Scoring:\", str(score)\n print \"A matrix =\"\n #print_matrix(x, y, A)\n print \"Optimal Score =\", best\n print \"Max location in matrix =\", optloc\n #for i in range(0, len(trace)):\n #print(trace[i])\n # return the opt score and the best location\n return best, optloc, alOne, alTwo", "def prob_t_a_given_s(self, alignment_info):\n ...", "def compute_global_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n #initialization of start position as bottom-right corner of matrix\n x_pos = len(seq_x)\n y_pos = len(seq_y)\n\n #initialization of variables\n result_seq_x = ''\n result_seq_y = ''\n score = alignment_matrix[x_pos][y_pos]\n\n #start in bottom right corner of matrix and go upwards till we reach left or upper edge\n #in every iteration we reconstruct alignments based on value in alignment_matrix and scoring_matrix\n while x_pos != 0 or y_pos !=0:\n current_value = alignment_matrix[x_pos][y_pos]\n \n if current_value == alignment_matrix[x_pos-1][y_pos-1] + scoring_matrix[seq_x[x_pos-1]][seq_y[y_pos-1]] and x_pos > 0 and y_pos > 0:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n x_pos -= 1\n y_pos -= 1\n elif current_value == alignment_matrix[x_pos-1][y_pos] + scoring_matrix[seq_x[x_pos-1]][\"-\"]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = \"-\" + result_seq_y\n x_pos -= 1\n else: \n result_seq_x = \"-\" + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n y_pos -= 1\n\n return (score,result_seq_x,result_seq_y)", "def greedy_alignment(embed1, embed2, top_k, nums_threads, metric, normalize, csls_k, accurate):\n t = time.time()\n sim_mat = sim(embed1, embed2, metric=metric, normalize=normalize, csls_k=csls_k)\n num = sim_mat.shape[0]\n if nums_threads > 1:\n hits = [0] * len(top_k)\n mr, mrr = 0, 0\n alignment_rest = set()\n rests = list()\n search_tasks = task_divide(np.array(range(num)), nums_threads)\n pool = multiprocessing.Pool(processes=len(search_tasks))\n for task in search_tasks:\n mat = sim_mat[task, :]\n rests.append(pool.apply_async(calculate_rank, (task, mat, top_k, accurate, num)))\n pool.close()\n pool.join()\n for rest in rests:\n sub_mr, sub_mrr, sub_hits, sub_hits1_rest = rest.get()\n mr += sub_mr\n mrr += sub_mrr\n hits += np.array(sub_hits)\n alignment_rest |= sub_hits1_rest\n else:\n mr, mrr, hits, alignment_rest = calculate_rank(list(range(num)), sim_mat, top_k, accurate, num)\n assert len(alignment_rest) == num\n hits = np.array(hits) / num * 100\n for i in range(len(hits)):\n hits[i] = round(hits[i], 3)\n cost = time.time() - t\n if accurate:\n if csls_k > 0:\n print(\"accurate results with csls: csls={}, hits@{} = {}%, mr = {:.3f}, mrr = {:.6f}, time = {:.3f} s \".\n format(csls_k, top_k, hits, mr, mrr, cost))\n else:\n print(\"accurate results: hits@{} = {}%, mr = {:.3f}, mrr = {:.6f}, time = {:.3f} s \".\n format(top_k, hits, mr, mrr, cost))\n else:\n if csls_k > 0:\n print(\"quick results with csls: csls={}, hits@{} = {}%, time = {:.3f} s \".format(csls_k, top_k, hits, cost))\n else:\n print(\"quick results: hits@{} = {}%, time = {:.3f} s \".format(top_k, hits, cost))\n hits1 = hits[0]\n del sim_mat\n gc.collect()\n return alignment_rest, hits1, mr, mrr", "def local_aligner(s1, s2, gap_penalty=-1, gap_opening_penalty=-10, k=1, sub_alignments_num=1, edit_function=utils.sub_matrices_distance, matrix=MatrixInfo.pam120):\n\n alignments = []\n \n # Build the initial score matrix.\n [score, S, backtrack_matrix, i_max, j_max] = local_aligner_score(s1, s2, gap_penalty=gap_penalty, gap_opening_penalty=gap_opening_penalty, edit_function=edit_function, matrix=matrix)\n for n in range(sub_alignments_num):\n align_list_n = gb2.backtrack_sequence_rec(s1[:i_max], s2[:j_max], backtrack_matrix.iloc[:i_max+1, :j_max+1], k=k)\n \n # Add the alignment scores to each alignment\n for align_i in align_list_n:\n align_i.score = score\n # Add the alignments to the overall list of alignments\n alignments += align_list_n\n \n # Update the score matrix to get more subalignments.\n # Small optimization: done only if sub_alignments_num > 1\n if sub_alignments_num > 1:\n # Update the score matrix to get more subalignments.\n # Get the coordinates of one best matching\n coordinate_list = reconstruct_sequence(s1, s2, S, backtrack_matrix.iloc[:i_max+1, :j_max+1], gap_penalty, gap_opening_penalty, edit_function=edit_function, matrix=matrix)\n update_score_matrix(s1, s2, S, coordinate_list, backtrack_matrix, gap_penalty, gap_opening_penalty, edit_function=edit_function, matrix=matrix)\n\n # Find the new maximum value in the matrix.\n [i_max, j_max] = np.unravel_index(np.argmax(S), S.shape)\n score = S[i_max, j_max]\n if i_max == 0 and j_max == 0:\n break\n \n return alignments", "def main(argv):\n \n ### gets data from csv, sets variables\n seq1, seq2 = get_seqs('../data/seq.csv')\n \n \n # Assign the longer sequence to s1, and the shorter to s2\n l1, l2 = len(seq1), len(seq2)\n if l1 >= l2:\n s1, s2 = ((l2 - 1) * \".\" + seq1 + (l2 - 1) * \".\"), seq2\n #puts l2-1 \".\"s both sides of l1, allows alignment of all overlap combos\n else:\n s1, s2 = ((l1 - 1) * \".\" + seq2 + (l1 - 1) * \".\"), seq1\n l1, l2 = l2, l1 \n\n # writes alignment(s) with highest score into output file\n my_best_score = -1 #so 0 beats best score\n for i in range(l1 + l2 -1):\n score, matched, shift, end_shift = calculate_score(s1, s2, l1, l2, i)\n #assigns returns from calc_score function to these variables\n if score > my_best_score:\n my_best_score = score\n statement = \"This alignment occurs when the smaller strand (\" + \\\n str(l2) + \"nt in length) attaches from base \" + str(i - l2 + 2) + \\\n \" of the larger strand, with the highest score of \" + str(score) + \\\n \":\\n\"\n #statement explaining the alignment in detail\n best_comparison_highSP = (shift + matched + (l2 - 1) * \".\" + \"\\n\")\n best_comparison_lowSP = (shift + matched + end_shift + \"\\n\")\n best_s2, best_s1 = (shift + s2 + end_shift + \"\\n\"), (s1 + \"\\n\\n\\n\")\n #formats the matching, s1 and s2 lines to line-up neatly\n if i < l1 - 1:\n best_alignment = (str(statement) + str(best_comparison_lowSP) \\\n + str(best_s2) + str(best_s1))\n else:\n best_alignment = (str(statement) + str(best_comparison_highSP) \\\n + str(best_s2) + str(best_s1))\n # uses returned variables to write a statement about the alignment \n # giving its score and startpoint, and assigns 3 lines of alignment \n # (s1, s2 and matching bases) to a variable each for later printing\n f = open('../results/seqs_align.txt', 'w')\n f.write(best_alignment)\n f.close()\n print(\"Done!\")\n return None", "def prune(self, alignment_infos):\n alignments = []\n best_score = 0\n\n for alignment_info in alignment_infos:\n score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self)\n best_score = max(score, best_score)\n alignments.append((alignment_info, score))\n\n threshold = IBMModel5.MIN_SCORE_FACTOR * best_score\n alignments = [a[0] for a in alignments if a[1] > threshold]\n return set(alignments)", "def compute_alignment_matrix(seq_x,seq_y,scoring_matrix,global_flag):\n \n rows = len(seq_x)\n cols = len(seq_y)\n #if sequences are empty return [[0]]\n if rows == 0 and cols == 0:\n return [[0]]\n \n #initialize of alignment matrix and other variables\n alignment_matrix = [[ 0 for col in range(cols+1)] for row in range(rows+1)]\n value = 0\n \n for row in range(rows+1):\n for col in range(cols+1):\n #for every entry its value is computed \n if row == 0 and col == 0:\n #entry [0,0]\n alignment_matrix[row][col] = 0\n elif row == 0:\n #entry [0,j] is computed based on values [0,j-1] and score of (\"-\" and seq_y[j]) \n value = alignment_matrix[row][col-1] + scoring_matrix[\"-\"][seq_y[col-1]]\n elif col == 0:\n #entry [i,0] is computed based on values [i-1,0] and score of (seq_x[i] and \"-\")\n value = alignment_matrix[row-1][col] + scoring_matrix[seq_x[row-1]][\"-\"]\n else:\n #entry [i,j] is computed based of [i-1,j-1],[i,j-1],[i-1,j] as maximum of values\n val1 = alignment_matrix[row-1][col-1] + scoring_matrix[seq_x[row-1]][seq_y[col-1]]\n val2 = alignment_matrix[row-1][col] + scoring_matrix[seq_x[row-1]][\"-\"]\n val3 = alignment_matrix[row][col-1] + scoring_matrix[\"-\"][seq_y[col-1]]\n\n value = max(val1,val2,val3)\n \n if not global_flag:\n #for local alignment negative score is replaced with 0\n value = max(value,0)\n \n alignment_matrix[row][col] = value \n\n return alignment_matrix", "def compute_alignment_matrix(seq_x,seq_y,scoring_matrix,global_flag):\n m_len, n_len = len(seq_x), len(seq_y)\n s_alignment_matrix = [[0 for _ in range(n_len+1)] for _ in range(m_len+1)]\n # print s_alignment_matrix\n for i_idx in range(1, m_len+1):\n last_score = s_alignment_matrix[i_idx-1][0] + scoring_matrix[seq_x[i_idx-1]]['-']\n s_alignment_matrix[i_idx][0] = \\\n (global_flag) and last_score or max(0, last_score)\n\n for j_idx in range(1,n_len+1):\n last_score = s_alignment_matrix[0][j_idx-1] + scoring_matrix['-'][seq_y[j_idx-1]]\n s_alignment_matrix[0][j_idx] = (global_flag) and last_score or max(0, last_score)\n\n for i_idx in range(1, m_len+1):\n for j_idx in range(1, n_len+1):\n diag_score = s_alignment_matrix[i_idx-1][j_idx-1] + scoring_matrix[seq_x[i_idx-1]][seq_y[j_idx-1]]\n up_score = s_alignment_matrix[i_idx-1][j_idx] + scoring_matrix[seq_x[i_idx-1]]['-']\n left_score = s_alignment_matrix[i_idx][j_idx-1] + scoring_matrix['-'][seq_y[j_idx-1]]\n max_score = max(diag_score,up_score,left_score)\n s_alignment_matrix[i_idx][j_idx] = (global_flag) and max_score or max(0, max_score)\n return s_alignment_matrix", "def mapForwards(queryString):\n # Find seed matches, aka \"aligned kmers\"\n seeds = list(minimizerIndex.getMatches(queryString))\n \n # For each cluster of seeds\n for seedCluster in SeedCluster.clusterSeeds(list(seeds), l=config.l):\n \n # Get substring of query and target to align\n queryStringStart = max(0, seedCluster.minX - config.c) # Inclusive coordinate\n queryStringEnd = min(len(queryString), seedCluster.maxX + config.k + config.c) # Exclusive coordinate\n querySubstring = queryString[queryStringStart:queryStringEnd]\n \n targetStringStart = max(0, seedCluster.minY - config.c) # Inclusive coordinate\n targetStringEnd = min(len(targetString), seedCluster.maxY + config.k + config.c) # Exclusive coordinate\n targetSubstring = targetString[targetStringStart:targetStringEnd]\n \n print( \"target_aligning\", targetStringStart, targetStringEnd, targetSubstring )\n print( \"query_aligning\", queryStringStart, queryStringEnd, querySubstring )\n \n # Align the genome and read substring\n alignment = SmithWaterman(targetSubstring, querySubstring, \n gapScore=config.gapScore, \n matchScore=config.matchScore,\n mismatchScore=config.mismatchScore)\n \n # Update best alignment if needed\n if bestAlignment[0] == None or alignment.getMaxAlignmentScore() > bestAlignment[0].getMaxAlignmentScore():\n bestAlignment[0] = alignment\n \n return bestAlignment", "def chainFn(alignedSegments, refSeq, readSeq, scoreFn=\\\n lambda alignedSegment, refSeq, readSeq : \\\n sum([ length for op, length in alignedSegment.cigar if op == 0]), maxGap=200):\n #Score function is number of aligned pairs\n def getStartAndEndCoordinates(alignedSegment):\n \"\"\"Gets the start and end coordinates in both the reference and query, using coordinates\n relative to the original read and reference equence\n \"\"\"\n return alignedSegment.reference_start, getFirstNonClippedPositionInRead(alignedSegment, readSeq), \\\n alignedSegment.reference_end-1, getLastNonClippedPositionInRead(alignedSegment, readSeq) \n \n alignedSegmentToScores = dict([ (aR, scoreFn(aR, refSeq, readSeq)) for aR in alignedSegments])\n alignedSegmentToCoordinates = dict([ (aR, getStartAndEndCoordinates(aR)) for \\\n aR in alignedSegments])\n alignedSegmentPointers = {}\n \n #Currently uses sloppy quadratic algorithm to find highest chain\n alignedSegments = sorted(alignedSegments, key=lambda aR : alignedSegmentToCoordinates[aR][0]) \n #Sort by reference coordinate\n for i in xrange(len(alignedSegments)):\n aR = alignedSegments[i]\n rStart, qStart, rEnd, qEnd = alignedSegmentToCoordinates[aR]\n score = alignedSegmentToScores[aR]\n for j in xrange(i): #Look at earlier alignments in list\n aR2 = alignedSegments[j]\n rStart2, qStart2, rEnd2, qEnd2 = alignedSegmentToCoordinates[aR2]\n assert rStart2 <= rStart\n if rStart > rEnd2 and qStart > qEnd2 and aR.is_reverse == aR2.is_reverse and \\\n rStart - rEnd2 + qStart - qEnd2 <= maxGap and \\\n score + alignedSegmentToScores[aR2] > alignedSegmentToScores[aR]: \n #Conditions for a chain\n alignedSegmentToScores[aR] = score + alignedSegmentToScores[aR2]\n alignedSegmentPointers[aR] = aR2\n \n #Now find highest scoring alignment \n aR = max(alignedSegments, key=lambda aR : alignedSegmentToScores[aR])\n \n #Construct chain of alignedSegments\n chain = [ aR ]\n while aR in alignedSegmentPointers:\n aR = alignedSegmentPointers[aR]\n chain.append(aR)\n chain.reverse()\n \n return chain", "def question2():\n \n # load sequences and scoring matrix\n score_matrix = read_scoring_matrix(PAM50_URL)\n human_seq = \"HSGVNQLGGVFVNGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATPEVVSKIAQYKRECPSIFAWEIRDRLLSEGVCTNDNIPSVSSINRVLRNLASEKQQ\"\n frfly_seq = \"HSGVNQLGGVFVGGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATAEVVSKISQYKRECPSIFAWEIRDRLLQENVCTNDNIPSVSSINRVLRNLAAQKEQQ\"\n consensus_pax = read_protein(CONSENSUS_PAX_URL)\n \n # compute human and fruitfly global alignment matrix with consensus pax\n human_align_matrix = student.compute_alignment_matrix(human_seq, consensus_pax, score_matrix, True)\n frfly_align_matrix = student.compute_alignment_matrix(frfly_seq, consensus_pax, score_matrix, True)\n \n # compute human and fruitfly global alignment sequences\n score_human, human_align, consensus_align = student.compute_global_alignment(human_seq, consensus_pax, \n score_matrix, human_align_matrix)\n score_fly, frfly_align, consensus_align_2 = student.compute_global_alignment(frfly_seq, consensus_pax,\n score_matrix, frfly_align_matrix)\n \n # compute percentages match for human and fruitfly\n human_count = 0.0\n for index in range(len(human_align)):\n if human_align[index] == consensus_align[index]:\n human_count += 1\n \n frfly_count = 0.0\n for index in range(len(frfly_align)):\n if frfly_align[index] == consensus_align_2[index]:\n frfly_count += 1\n \n print \"% Human: \" + str(human_count / len(human_align) * 100)\n print \"Hmn: \" + human_align\n print \"PAX: \" + consensus_align\n \n print \"\"\n \n print \"% FrFly: \" + str(frfly_count / len(frfly_align) * 100)\n print \"Fly: \" + frfly_align\n print \"PAX: \" + consensus_align_2", "def compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n\n align_x = \"\"\n align_y = \"\"\n\n len_x = len(seq_x)\n len_y = len(seq_y)\n\n score = alignment_matrix[len_x][len_y]\n\n while len_x > 0 and len_y > 0:\n if alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y - 1] + scoring_matrix[seq_x[len_x-1]][seq_y[len_y-1]]:\n align_x = seq_x[len_x-1] + align_x\n align_y = seq_y[len_y-1] + align_y\n len_x -= 1\n len_y -= 1\n elif alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y] + scoring_matrix[seq_x[len_x-1]][\"-\"]:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n while len_x > 0:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n\n while len_y > 0:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n return (score, align_x, align_y)", "def local_aligner_score(s1, s2, gap_penalty=-1, gap_opening_penalty=-10, edit_function=utils.sub_matrices_distance, matrix=MatrixInfo.pam120):\n\n n_row = len(s1) + 1\n n_col = len(s2) + 1\n # Creates a matrix where the partial scores are stored.\n S = np.zeros((n_row, n_col))\n # Creates a matrix (stored as DataFrame) where the optimal movements are\n # stored.\n backtrack_matrix = pd.DataFrame(\"\", index=np.arange(n_row), columns=np.arange(n_col))\n\n # Initialize the first column and row of the matrices.\n # In the local aligner, we stop when a 0 is encountered, which corresponds to an \"X\"\n for i in range(n_row):\n backtrack_matrix.set_value(i, 0, \"X\")\n\n for j in range(n_col):\n backtrack_matrix.set_value(0, j, \"X\")\n \n # small optimization: keep track of the maximum score encountered so far, and its indices.\n score_max = 0\n i_max = 0\n j_max = 0\n \n for i in range(1, n_row):\n for j in range(1, n_col):\n # Compute the possible movements, and then keeps the best.\n s1_gap = max([S[i - k, j] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, i+1)])\n s2_gap = max([S[i, j - k] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, j+1)])\n mut = S[i - 1, j - 1] + edit_function(s1[i - 1], s2[j - 1], matrix=matrix)\n # In the local aligner, don't accept negative scores!\n S[i, j] = max(s1_gap, s2_gap, mut, 0)\n\n if S[i, j] >= score_max:\n score_max = S[i, j]\n i_max = i\n j_max = j\n # Write in the matrix the movement that lead to that cell, as a string.\n # e.g. \"HV\" means that horizontal and vertical movements were the\n # best.\n # In local alignment, \"X\" means that 0 was the maximum value, and all the movements gave a negative score.\n # The backtracking will stop when an \"X\" is encountered.\n backtrack_matrix.set_value(i, j, \"\".join(check_argmax([s1_gap, s2_gap, mut, 0])))\n \n return [score_max, S, backtrack_matrix, i_max, j_max]", "def init_aligner(allow_target_gaps=False, allow_target_mismatches=False):\n a = Align.PairwiseAligner()\n a.mismatch = -np.inf\n a.mismatch_score = -np.inf\n\n # Don't allow for gaps or mismatches with the target sequence\n if not allow_target_gaps:\n a.target_gap_score = -np.inf\n\n # Do not let matching items overwhelm determining where gaps should go\n if not allow_target_gaps:\n a.match = 10\n else:\n a.match = 200\n\n if allow_target_mismatches:\n a.mismatch = 200\n\n # Generally, prefer to extend gaps than to create them\n a.query_extend_gap_score = 99\n a.query_open_gap_score = 49\n\n # Set slight preference for open gaps on the edges, however, if present, strongly prefer single edge gaps\n a.query_end_open_gap_score = 50\n a.query_end_extend_gap_score = 100\n\n return a" ]
[ "0.6601188", "0.6398663", "0.6118952", "0.61056167", "0.60626775", "0.6023626", "0.60167754", "0.5994181", "0.59916455", "0.59730154", "0.5915752", "0.5887349", "0.5867155", "0.5839666", "0.57940066", "0.57413554", "0.5679159", "0.56668687", "0.5643491", "0.5640524", "0.56384325", "0.5601963", "0.5536254", "0.5481229", "0.54775566", "0.54751986", "0.544602", "0.54423404", "0.5423979", "0.54198027" ]
0.6587548
1
Method that close a connection
def closeConnection(connection): connection.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n self._connection.close()", "def close_connection(self, connection):\n pass", "def close_connection(self, connection ):\n pass", "def close_connection(self):\n\n self._connection.close()\n print(\"Closed connection....\")", "def close(self): \n self.connection.close()", "def close_connection(self):\n self.conn.close()", "def close_connection(self):\n self.connection.close()", "def close_connection(self):\n self.connection.close()", "def close_connection(self) -> None:\n self.conn.close()", "def closeConnection(self):\n print(\"closing connection...\")\n self.s.close()\n quit()", "def close_connection(self) -> None:\n self.connection.close()", "def close_connection(self):\n self._conn.close()", "def close(self): \n\t\tself.connection = None", "def close_connection(self):\r\n if self.conn:\r\n self.conn.close()", "def close_connection(self):\n\t\tself.session.close()", "def close_connection(self, connection):\n connection.close()", "async def close_connection(self):\n\t\t...", "def disconnect(conn):\n conn.close()", "def close_connection(self):\n if self.connection is not None:\n self.connection.close()", "def _close_connection(self):\n if self.connection:\n self.connection.destroy()\n self.connection = None", "def close_connection(self):\n self.session.close()", "def close(self):\n try:\n self.connection.Close()\n del self.connection\n except:\n pass", "def close(self):\n self.connection.close()\n print(\"Connection on port \" + str(self.port) + \" closed.\")", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()", "def close(self):\n self.connection.close()" ]
[ "0.80904424", "0.80650824", "0.8032496", "0.8001846", "0.7983716", "0.7976008", "0.7946594", "0.7946594", "0.7932376", "0.7921975", "0.78874135", "0.78827935", "0.7838825", "0.78090316", "0.7802317", "0.7776795", "0.76978225", "0.7654071", "0.7624984", "0.7618633", "0.761622", "0.7606794", "0.7605566", "0.75960726", "0.75960726", "0.75960726", "0.75960726", "0.75960726", "0.75960726", "0.75960726" ]
0.81997216
0
Method that reads the possible names from a file
def readNames(): namesRead = [] with open("Files/Names.txt", 'r', encoding='utf8') as f: for line in f: if line == "\n": continue namesRead.append(line.rstrip('\n').rstrip().lstrip()) f.close() return namesRead
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readFile(fileName):\n\tf = open(fileName, 'r')\n\tnames = map(lambda s: s[1:-1], f.read().split(','))\n\tnames.sort()\n\treturn names", "def load_names(file_name: str) -> List[str]:\n full_path_name = os.path.join(os.getcwd(), 'names', file_name)\n with open(full_path_name, 'r') as file:\n names = file.read().rstrip('\\n').split(',')\n return sorted(names)", "def load_names(path):\n global taxid_names, scientific_names, synonyms, lowercase_names\n with open(path, 'r') as r:\n for line in r:\n (taxid, name, unique, kind) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 3)\n if kind == 'scientific name':\n taxid_names[taxid] = name\n scientific_names[name] = taxid\n else:\n synonyms[name] = taxid\n lowercase_names[name.lower()] = taxid", "def read_name_file(filename):\n with open(filename, 'r') as f:\n names = f.read()\n names = names.split('\\n')\n names = list(filter(None, names))\n return names", "def load_names(self):\n temp_names = []\n\n with open(self.NAMES_FILE) as f:\n for line in f:\n if len(line.strip()) > 0:\n temp_names.append(line.strip())\n\n return temp_names", "def name_list(file_name):\n \n li = open(file_name)\n list_of_names = []\n\n for name in li:\n (first,last) = str.split(name,' ')\n list_of_names.append(Name(first,last))\n return list_of_names", "def getFile(filename):\n f = open(filename)\n lines = f.readlines()\n f.close()\n names = []\n names.append(\"\")\n for name in lines[0].split(','):\n \n names.append(name[1:-1])\n names = sorted(names)\n return names", "def process_file(file_name):\n f = open(\"babynames/\" + file_name, 'r')\n reader = csv.reader(f)\n return list(reader)", "def load_names() -> list:\n with open(Path(\"bot/resources/pride/drag_queen_names.json\"), \"r\", encoding=\"utf8\") as f:\n return json.load(f)", "def readSurnames():\n surnamesRead = []\n with open(\"Files/Surnames.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n surnamesRead.append(line.rstrip('\\n').rstrip().lstrip())\n f.close()\n return surnamesRead", "def _read_names_file(self):\n filename = os.path.join(self.path, 'names.csv')\n lookup = collections.defaultdict(list)\n with open(filename) as f:\n reader = csv.reader(f)\n for line in reader:\n matches = set(line)\n for match in matches:\n lookup[match].append(matches)\n return lookup", "def read_drama_names(drama_file):\n with open(drama_file, 'rb') as f:\n drama_list = [d.decode('utf-8').strip() for d in f.readlines()]\n return drama_list", "def read_names(male_names_file_path, female_names_file_path):\n\n names = set()\n\n with open(male_names_file_path, \"r\") as f1:\n for name in f1:\n names.add(name.strip().lower())\n\n with open(female_names_file_path, \"r\") as f2:\n for name in f2:\n names.add(name.strip().lower())\n\n return names", "def open_read_file(file):\n\n file_name = set([])\n if (os.path.exists(file)):\n data = open(file, 'r')\n \n lines = data.readlines()\n # create an instance of IndexFileIO\n index_file_io = sys_functions.IndexFileIo()\n # print 'lines: '\n # print lines\n for line in lines:\n (name, date) = index_file_io.parse_index_line(line)\n file_name.add(name)\n \n #sp_line = string.split(line)\n #if (sp_line != []):\n # name = sp_line[0]\n # file_name.add(name)\n\n # print file_name\n return file_name", "def get_names(filename):\n\n with open(filename, \"r\") as readFile:\n names = [each.split(',') for each in readFile] #Split at the commas\n names = list(names[0]) #Retrieves the list of names within the list names\n names = [each.strip(\"\\\"\") for each in names] #Strips each name of the double quotes\n return names", "def read_names(file_name):\n\twith open(file_name, 'r') as f:\n\t\tnames_raw = f.read()\n\tnames_raw = names_raw.split(',')\n\tnames_stripped = []\n\n\tfor name in names_raw:\n\t\tnames_stripped.append(name.strip('\\\"'))\n\tprint(names_stripped)\n\treturn names_stripped", "def read_names(path):\n return SortedSet([os.path.basename(n) for n in glob.glob(path + os.sep + '*')])", "def load_firstnames(gender):\n return load_resource(\"resources/%s.txt\" % gender)", "def _possible_names(self, filename):\n names = [filename]\n if not self._iszip(filename):\n for zipext in _file_openers.keys():\n if zipext:\n names.append(filename+zipext)\n return names", "def read_file(file_name):\n with open(file_name) as f:\n content = f.readlines()\n names = []\n dnas = []\n dna = \"\"\n name = \"\"\n for line in content:\n line = line.strip()\n if line[0] == \">\":\n names.append(name)\n dnas.append(dna)\n name = line[1:]\n dna = \"\"\n else:\n dna += line\n names.append(name)\n dnas.append(dna)\n\n return (names[1:], dnas[1:])", "def load_names(args):\n # NAMES is a json document which is just a list of names\n if os.path.isfile(args.names):\n with open(args.names, 'r') as n:\n try:\n names = json.load(n)\n except:\n sys.exit(\"ERROR: {0} is invalid JSON\".format(args.names))\n else:\n sys.exit(\"ERROR {0} file not found.\".format(args.names))\n if len(names) <= 1:\n sys.exit(\"ERROR: {0} needs to have more than 1 name in it\".format(args.names))\n return names", "def read_screen_names(filename):\n flist = []\n f = open('candidates.txt')\n for line in f:\n \tflist.append(line.strip('\\n'))\t\n return flist", "def read_str_name(path):\r\n name = []\r\n name_stru = {}\r\n with open(path, \"r+\") as f:\r\n line = f.readlines()\r\n \r\n # to load the name to list. files\r\n for i in range(len(line)):\r\n \r\n if line[i][:-1] != '':\r\n \r\n name.append(line[i][:-1])\r\n else:\r\n \r\n name.append(line[i-1][:-1] + str())\r\n \r\n line[i] = line[i-1]\r\n \r\n # to remark the structure name\r\n for s in name:\r\n \r\n name_stru[s] = (name.count(s),name.index(s))\r\n \r\n for key,values in name_stru.items():\r\n \r\n if values[0] != 1:\r\n for i in range(values[0]):\r\n name[values[1]+i] = name[values[1]+i] + str(i+1)\r\n \r\n return name", "def read_names_list(file_path):\r\n\tnames_list = []\r\n\twith open(file_path) as file:\r\n\t for line in file:\r\n\t cline = line.rstrip().split()\r\n\t #row_id = cline[0]\r\n\t row_name = cline[1:]\r\n\t #names_list.append((row_id, \" \".join(row_name)))\r\n\t names_list.append(\" \".join(row_name))\r\n\treturn names_list", "def load_file(file_name):\n file = open(file_name, 'r')#open the file\n colors = file.read() #reads entire contents of the file and assigns it to names. This is the processing of the file\n file.close() #always close the file\n\n return colors", "def get(name, filename):\n\tlogging.info(\"Reading {} from {}\".format(name, filename))\n\tlogging.debug(\"Opening file\")\n\twith open(filename, \"r+\") as f:\n\t\treader = csv.reader(f)\n\t\tlogging.debug(\"Reading name/snippet from file\")\n\t\tin_file = False\n\t\tfor row in reader:\n\t\t\tif str(row[0]) == name:\n\t\t\t\tin_file = True\n\t\t\t\tprint row\n\t\tif in_file == False:\n\t\t\tprint \"That's not in this file\"\n\tlogging.debug(\"Read successful\")\n\treturn name, filename", "def _read_files(self):\n \n for langname in self.langnames:\n filename = f'data/word_lists/{langname}.txt'\n with open(filename) as f:\n index = self.langnames.index(langname)\n lang_list = getattr(self, f'word_list{index}')\n words = f.readlines()\n for word in words:\n fword = ''.join(char for char in word if char is not '\\n')\n lang_list.append(fword)\n f.close()\n return", "def load(filename: str) -> list:\n try:\n with open(filename) as in_file:\n loaded_txt = in_file.read().strip().split(\"\\n\")\n loaded_txt = [x.lower() for x in loaded_txt]\n return loaded_txt\n except IOError as e:\n print(\"{}\\nError opening {}. Terminating program.\".format(e, filename))", "def create_names_dict(infile):\n return [name.strip(\"\\n\") for name in open(infile, \"r\")]", "def parse_names(lines, oti_file_name):\n print \" * Parsing names\"\n # Read the real texture file names form the file.\n real_names = []\n if os.path.isfile(oti_file_name):\n with open(oti_file_name, \"rU\") as oti_fd:\n real_names = oti_fd.read().splitlines()\n\n names = {}\n for i, line in enumerate(lines):\n name = \".\"\n if i < len(real_names):\n name = real_names[i]\n names[\"%s\" % i] = {\"alias\": line, \"name\": name}\n return names" ]
[ "0.66513824", "0.6637792", "0.6586358", "0.6516887", "0.6475405", "0.64594156", "0.64459467", "0.632488", "0.62704265", "0.6254457", "0.62353885", "0.62281996", "0.62062573", "0.6203571", "0.6193799", "0.61811256", "0.6171002", "0.6165685", "0.61483276", "0.6145361", "0.6130439", "0.6129727", "0.61294776", "0.6069565", "0.60623825", "0.6051826", "0.60470235", "0.60224783", "0.6020715", "0.59834427" ]
0.68021345
0
Method that reads the possible surnames from a file
def readSurnames(): surnamesRead = [] with open("Files/Surnames.txt", 'r', encoding='utf8') as f: for line in f: if line == "\n": continue surnamesRead.append(line.rstrip('\n').rstrip().lstrip()) f.close() return surnamesRead
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_surnames(filename):\n result = []\n with open(filename, \"r\") as file:\n for line in file.readlines():\n surname = line.split('\\t')[1]\n result.append(surname)\n return result", "def _read_names_file(self):\n filename = os.path.join(self.path, 'names.csv')\n lookup = collections.defaultdict(list)\n with open(filename) as f:\n reader = csv.reader(f)\n for line in reader:\n matches = set(line)\n for match in matches:\n lookup[match].append(matches)\n return lookup", "def readNames():\n namesRead = []\n with open(\"Files/Names.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n namesRead.append(line.rstrip('\\n').rstrip().lstrip())\n f.close()\n return namesRead", "def load_names(path):\n global taxid_names, scientific_names, synonyms, lowercase_names\n with open(path, 'r') as r:\n for line in r:\n (taxid, name, unique, kind) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 3)\n if kind == 'scientific name':\n taxid_names[taxid] = name\n scientific_names[name] = taxid\n else:\n synonyms[name] = taxid\n lowercase_names[name.lower()] = taxid", "def load_strands_from_file(filename):\n allowed = \"{}{}{}{}\".format(Bases.Adenine, Bases.Cytosine, Bases.Thymine, Bases.Guanine)\n converted_strands = []\n\n with open(filename, 'r') as strands_file:\n return load_strands([strand for strand in strands_file]) # There may be a better way to do this.", "def read_names(male_names_file_path, female_names_file_path):\n\n names = set()\n\n with open(male_names_file_path, \"r\") as f1:\n for name in f1:\n names.add(name.strip().lower())\n\n with open(female_names_file_path, \"r\") as f2:\n for name in f2:\n names.add(name.strip().lower())\n\n return names", "def read_drama_names(drama_file):\n with open(drama_file, 'rb') as f:\n drama_list = [d.decode('utf-8').strip() for d in f.readlines()]\n return drama_list", "def read_name_file(filename):\n with open(filename, 'r') as f:\n names = f.read()\n names = names.split('\\n')\n names = list(filter(None, names))\n return names", "def read_screen_names(filename):\n flist = []\n f = open('candidates.txt')\n for line in f:\n \tflist.append(line.strip('\\n'))\t\n return flist", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n students = f.read().splitlines()\n return students", "def readFile(fileName):\n\tf = open(fileName, 'r')\n\tnames = map(lambda s: s[1:-1], f.read().split(','))\n\tnames.sort()\n\treturn names", "def process_file(file_name):\n f = open(\"babynames/\" + file_name, 'r')\n reader = csv.reader(f)\n return list(reader)", "def load_names(self):\n temp_names = []\n\n with open(self.NAMES_FILE) as f:\n for line in f:\n if len(line.strip()) > 0:\n temp_names.append(line.strip())\n\n return temp_names", "def load_firstnames(gender):\n return load_resource(\"resources/%s.txt\" % gender)", "def parse(self, file_name):\n try:\n fp = open(file_name, encoding='utf-8')\n trigrams = []\n\n for line in fp.readlines():\n for i in range(len(line) - 2):\n trigrams.append(line[i : i + 3].lower())\n\n fp.close()\n return trigrams\n except UnicodeDecodeError:\n print(\"Skipping file: \", file_name)\n return []", "def get_people(filename):\n\n lines = [line.rstrip('\\n').rstrip('\\r') for line in open(filename)]\n return lines", "def load_names(file_name: str) -> List[str]:\n full_path_name = os.path.join(os.getcwd(), 'names', file_name)\n with open(full_path_name, 'r') as file:\n names = file.read().rstrip('\\n').split(',')\n return sorted(names)", "def name_list(file_name):\n \n li = open(file_name)\n list_of_names = []\n\n for name in li:\n (first,last) = str.split(name,' ')\n list_of_names.append(Name(first,last))\n return list_of_names", "def load_users(filename):\n with open(filename, 'rb') as f:\n for line in f:\n yield line.split(':', 1)[0]", "def read_file(file_name):\n with open(file_name) as f:\n content = f.readlines()\n names = []\n dnas = []\n dna = \"\"\n name = \"\"\n for line in content:\n line = line.strip()\n if line[0] == \">\":\n names.append(name)\n dnas.append(dna)\n name = line[1:]\n dna = \"\"\n else:\n dna += line\n names.append(name)\n dnas.append(dna)\n\n return (names[1:], dnas[1:])", "def read_names(file_name):\n\twith open(file_name, 'r') as f:\n\t\tnames_raw = f.read()\n\tnames_raw = names_raw.split(',')\n\tnames_stripped = []\n\n\tfor name in names_raw:\n\t\tnames_stripped.append(name.strip('\\\"'))\n\tprint(names_stripped)\n\treturn names_stripped", "def read_all_names(start,end,gender,datadir):\r\n names = []\r\n name_dict = {}\r\n\r\n for year in range(start,end+1):\r\n year_names = []\r\n year = str(year)\r\n try:\r\n path = r\"{0}/yob{1}.txt\".format(datadir,year)\r\n f = open(path)\r\n reader = csv.reader(f)\r\n rank = 1\r\n for name in reader:\r\n if name[1].upper() == gender.upper():\r\n #first time name appears\r\n if name[0] not in name_dict:\r\n name_dict[name[0]] = {}\r\n name_dict[name[0]].update({'gender':gender.upper()})\r\n name_dict[name[0]].update({'intro_year_pop':int(name[pop_index])})\r\n name_dict[name[0]].update({'intro_year':year})\r\n name_dict[name[0]].update({'intro_year_rank':rank})\r\n name_dict[name[0]].update({'pops':{year:int(name[pop_index])}})\r\n name_dict[name[0]].update({'ranks':{year:rank}})\r\n #every time the name appears\r\n name_dict[name[0]]['pops'].update({year:int(name[pop_index])})\r\n name_dict[name[0]]['ranks'].update({year:rank})\r\n rank = rank + 1\r\n year_names.append(name[name_index])\r\n names.extend(year_names)\r\n f.close()\r\n except IOError:\r\n \tprint(\"{0} not found\".format(path))\r\n return name_dict", "def getFile(filename):\n f = open(filename)\n lines = f.readlines()\n f.close()\n names = []\n names.append(\"\")\n for name in lines[0].split(','):\n \n names.append(name[1:-1])\n names = sorted(names)\n return names", "def get_names(filename):\n\n with open(filename, \"r\") as readFile:\n names = [each.split(',') for each in readFile] #Split at the commas\n names = list(names[0]) #Retrieves the list of names within the list names\n names = [each.strip(\"\\\"\") for each in names] #Strips each name of the double quotes\n return names", "def read_locations(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n key_firms = db['nif']\n year = db['year']\n locs = db['locations']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, key_firms, year, locs, methodvalues", "def extract_names(filename):\n # +++your code here+++\n # Opening the file\n f = open(filename, 'rU')\n # Reading all of the lines\n lines = f.readlines()\n # Empty list to hold the year, names, and ranks\n ranks_names = []\n for line in lines:\n # search for the year\n year = re.search(r'\\s(\\d\\d\\d\\d)</h3>', line)\n # if the year is found, append it to the list\n if year: \n ranks_names.append(year.group(1))\n # search for the rank, male name, and female name\n rank_male_female = re.search(r'(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>', line)\n # If they are found then append the male name plus its rank, as well as the \n # female name plus its rank\n if rank_male_female:\n ranks_names.append(rank_male_female.group(2) + ' ' + rank_male_female.group(1))\n ranks_names.append(rank_male_female.group(3) + ' ' + rank_male_female.group(1))\n # Sort the list alphabetically\n ranks_names.sort()\n # Return the list\n return ranks_names", "def simple_read_words(filename=\"nietzsche.txt\"):\n with open(\"nietzsche.txt\", \"r\") as f:\n words = f.read()\n return words", "def getUsers(users_file):\n user_names = tuple(open(users_file, 'r'));\n for user_name in user_names:\n clean_user_name = user_name.rstrip(\"\\n\")\n listQuestions(clean_user_name)", "def load_people(self, file_path):\n pass", "def build_basenames():\r\n dict = {}\r\n with open(STREETS_FILE) as file:\r\n for line in file:\r\n dict[line.strip()] = True\r\n return dict" ]
[ "0.70901465", "0.6467122", "0.6426788", "0.640283", "0.6300027", "0.6267863", "0.62439567", "0.61844873", "0.61413", "0.61241", "0.6024531", "0.60224545", "0.5978379", "0.59721935", "0.5935341", "0.59213847", "0.5903446", "0.5901011", "0.5898721", "0.58916605", "0.5885895", "0.5863155", "0.5820284", "0.57733995", "0.57472056", "0.5731114", "0.5729814", "0.57274437", "0.5715967", "0.57068276" ]
0.7767952
0
Method that reads the possible vaccines from a file
def readVaccines(): vaccinesRead = [] with open("Files/Vaccines.txt", 'r', encoding='utf8') as vaccine_file: for vaccine_lines in vaccine_file: vaccineDetails = vaccine_lines.split(",") details = [] for vaccineDetail in vaccineDetails: details.append(vaccineDetail.lstrip().rstrip().rstrip('\n')) vaccinesRead.append(details) vaccine_file.close() return vaccinesRead
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ReadAndTokenize(filename):\n global CACHE\n global VOCABULARY\n if filename in CACHE:\n return CACHE[filename]\n comment = open(filename).read()\n words = Tokenize(comment)\n\n terms = collections.Counter()\n for w in words:\n VOCABULARY[w] += 1\n terms[w] += 1\n\n CACHE[filename] = terms\n return terms", "def mina2_reader():\n with open(MINA2_FILE_PATH, 'r') as voc_file:\n\n voc_list = []\n lesson_list = []\n\n voc_count = 0\n lesson_count = 0\n\n for voc_line in voc_file:\n if voc_line.find(\"第\") != -1 and voc_line.find(\"课\") != -1:\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n voc_list = []\n voc_count = 0\n lesson_count = lesson_count + 1\n elif voc_line != \"\\n\" and voc_line.find(\"会 話\") == -1 and voc_line.find(\"読み物\") == -1:\n voc_line.strip()\n\n voc_split = voc_line.split(\"\\t\")\n\n if len(voc_split) < 2:\n continue\n\n voc_dict = {\n \"Voc\": voc_split[0],\n \"Ext\": voc_split[1],\n \"Type\": \"\",\n \"Meaning\": voc_split[2]\n }\n\n if not voc_dict.has_key(\"Voc\"):\n print voc_line\n continue\n\n voc_count = voc_count + 1\n voc_list.append(voc_dict)\n\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n return lesson_list", "def read_file(filename, tokenizer, is_cased):\n sents = []\n with open(filename) as f:\n for line in f:\n sents.append(tokenizer(line, is_cased))\n return sents", "def read_vasp(file_path):\r\n with open(file_path, 'r') as f:\r\n return eval(f.read())", "def read_file(self,filename):\n\n f = open(filename,'r')\n lines = f.readlines()\n f.close()\n\n sequences = [l.strip() for l in lines if l.strip() != \"\"]\n\n self.load_sequences(sequences)", "def readRosetta(self, file):\n\n\t\tself.readPDB(file)\n\n\t\ttry:\n\t\t\tpdb = open(file, 'r')\t\n\t\texcept:\n\t\t\tprint \"unable to open file\"\n\t\t\treturn\n\n\t\tbReadBack = 0\n\t\tbReadChi = 0\n\t\tchain = self.chain[0]\n\t\tfor line in pdb.readlines():\n\t\t\tline = string.rstrip(line)\n\n\t\t\tif line[0:8] == \"complete\":\n\t\t\t\tbReadBack = 1\n\t\t\t\tbReadChi = 0\n\t\t\t\tcontinue\n\n\t\t\tif line[0:14] == \"absolute decoy\":\n\t\t\t\tbReadChi = 1\n\t\t\t\tcontinue\n\n\t\t\tif bReadChi:\n\t\t\t\tif line[0:3] == \"res\":\n\t\t\t\t\tcontinue \n\n\t\t\t\tindex = int(line[0:4])\t\n\t\t\t\tmyres = chain.getResidue(index)\n\n\t\t\t\tmyres.chi1 = float(line[10:19])\n\t\t\t\tmyres.chi2 = float(line[20:29])\n\t\t\t\tmyres.chi3 = float(line[30:39])\n\t\t\t\tmyres.chi4 = float(line[40:49])\n\n\t\t\tif bReadBack:\n\t\t\t\tindex = int(line[0:4])\n\t\t\t\tmyres = chain.getResidue(index)\n\n\t\t\t\tmyres.ss = line[5:6]\n\t\t\t\tmyres.phi = float(line[8:17])\t\n\t\t\t\tmyres.psi = float(line[17:26])\n\t\t\t\tmyres.ome = float(line[26:35])", "def parse_file(self, path):\r\n return self._parse(antlr3.ANTLRFileStream(path))", "def readFastaFile(filename):", "def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.append(line)", "def mina1_reader():\n with open(MINA1_FILE_PATH, 'r') as voc_file:\n\n voc_list = []\n lesson_list = []\n\n voc_count = 0\n lesson_count = 0\n\n for voc_line in voc_file:\n if voc_line.find(\"大家日语\") != -1:\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n voc_list = []\n voc_count = 0\n lesson_count = lesson_count + 1\n elif voc_line != \"\\n\":\n voc_line.strip()\n\n voc_split = voc_line.split(\"\\t\")\n while '' in voc_split:\n voc_split.remove('')\n\n if len(voc_split) < 3:\n continue\n\n voc_dict = {\n \"Ext\": voc_split[0],\n \"Voc\": voc_split[1],\n \"Type\": \"\",\n \"Meaning\": voc_split[2]\n }\n\n if not voc_dict.has_key(\"Voc\"):\n print voc_line\n continue\n\n voc_count = voc_count + 1\n voc_list.append(voc_dict)\n\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n return lesson_list", "def process_raw_phrases(file_path):", "def _read(self, file_path: str) -> Iterator[Instance]:\n with open(file_path) as f:\n for line in f:\n pairs = line.split()\n words, tags = zip(*(pair.split(\"###\") for pair in pairs))\n yield self.text_to_instance([Token(word) for word in words], tags)", "def read_manual_file(filename):\n if not filename:\n return\n print(\"Loading manual scansions...\")\n with open(filename, \"r\") as file:\n lines = file.readlines()\n lines = [line.rstrip(\"\\n\").split(\"\\t\") for line in lines]\n for line in lines:\n verse_key = Verse.get_verse_key(line[0])\n scansion = re.sub(r\"([^a-z_\\^*\\[\\]()])\", \" \", line[0].lower()).rstrip(\" \").lstrip(\" \")\n scansion = Scansion(re.sub(\" +\", \" \", scansion))\n if len(line) == 1:\n Verse.DICT[verse_key] = {\"scansion\": scansion, \"comment\": \"\"}\n else:\n Verse.DICT[verse_key] = {\"scansion\": scansion, \"comment\": line[1]}", "def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'SubBasins':\n self.read_subbasins(f)\n elif self.cleantag(line) == 'HRUs':\n self.read_HRUs(f)\n # Next line\n line = f.nexttag()", "def readPDB(self, file):\n\n\t\ttry:\n\t\t\tpdb = open(file, 'r')\n\t\texcept:\n\t\t\tprint \"cannot open pdbfile\",file\n\t\t\treturn 0\n\n\t\tself.file = file\n\t\tpresi = \"\"\n\t\tpresn = \"\"\n\t\tprevc = \"\"\n\t\tnlines = 0\n\t\tmychain = None\n\t\tmyres = Residue()\n\t\tterm = 1\n\t\trescore = re.compile(\"res aa Eatr\")\n\t\tre_bk_tot = re.compile(\"bk_tot\")\n\t\tre_fa_rep = re.compile(\"fa_rep\")\n\t\tre_fa_atr = re.compile(\"fa_atr\")\n\t\tbReadScore = False\n\t\tfor line in pdb.readlines():\n\t\t\tline = string.strip(line)\n\n\t\t\tif line[0:3] == \"REM\":\n\t\t\t\tself.addRemark(line)\n\n\t\t\tif line[0:3] == \"TER\":\n\t\t\t\tterm = 1\n\n\t\t\tif rescore.search(line):\n\t\t\t\tbReadScore = True\n\t\t\t\tcontinue\n\n\t\t\t# read rosetta residue-based scoring information\n\t\t\tif bReadScore:\n\t\t\t\tcols = line.split()\n\t\t\t\tif cols[0] == \"totals\":\n\t\t\t\t\tbReadScore = False\n\t\t\t\t\tcontinue\n\n\t\t\t\tmyres = self.getResidue(int(cols[0]))\n\t\t\t\tif myres == None:\n\t\t\t\t\tprint \"warning reading score!!! cannot find residue:\",cols[0]\n\n\t\t\t\tmyres.Eatr = float(cols[2])\n\t\t\t\tmyres.Erep = float(cols[3])\n\t\t\t\tmyres.Esol = float(cols[4])\n\t\t\t\tmyres.Edun = float(cols[7])\n\t\t\t\tmyres.EhbBB = float(cols[9])\n\t\t\t\tmyres.EhbSC = float(cols[10])\n\t\t\t\tmyres.Egb = float(cols[13])\n\t\t\t\tmyres.Ecst = float(cols[16])\n\t\t\t\tmyres.Eres = float(cols[17])\n\t\t\t\t\n\t\t\t# read atomic information\n\t\t\tif line[0:4] == \"ATOM\" or line[0:6] == \"HETATM\":\n\t\t\t\tchain = line[21:22]\n\t\t\t\tif chain != prevc or nlines == 0 or term:\n\t\t\t\t\tmychain = Chain()\n\t\t\t\t\tmychain.name = chain\n\t\t\t\t\tprevc = chain\n\t\t\t\t\tself.addChain(mychain)\n\t\t\t\t\n\t\t\t\tresi = line[22:26]\n\t\t\t\tresn = line[17:20]\n\n\t\t\t\tif nlines == 0 or presi != resi or presn != resn:\n\t\t\t\t\tif term:\n\t\t\t\t\t\tif myres:\n\t\t\t\t\t\t\tmyres.terminal = \"CTER\"\n\t\t\t\t\t\t\n\t\t\t\t\tpresi = resi\n\t\t\t\t\tpresn = resn\n\t\t\t\t\tmyres = Residue()\n\t\t\t\t\tmyres.name = line[17:20]\n\t\t\t\t\tmyres.file_id = resi\n\t\t\t\t\tmychain.addResidue(myres)\n\n\t\t\t\t\tif term:\n\t\t\t\t\t\tmyres.terminal = \"NTER\"\n\t\t\t\t\t\tterm = 0\n\n\t\t\t\tmyatom = Atom()\n\t\t\t\t\n\t\t\t\tif line[0:4] == \"HETA\":\n\t\t\t\t\tmyatom.kind = \"HETATM\"\n\t\t\t\telse:\n\t\t\t\t\tmyatom.kind = \"ATOM \"\n\n\t\t\t\tmyatom.name = line[12:16]\n\t\t\t\tmyatom.file_id = line[6:11]\n\t\t\t\tmyatom.local = line[29:30]\n\t\t\t\tmyatom.coord[0] = float(line[30:38])\n\t\t\t\tmyatom.coord[1] = float(line[38:46])\n\t\t\t\tmyatom.coord[2] = float(line[46:54])\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif len(line) >= 66:\n\t\t\t\t\ttmpstr = line[54:68]\n\t\t\t\t\ttmplst = tmpstr.split()\n\t\t\t\t\tmyatom.occupancy = float(tmplst[0])\n\t\t\t\t\tmyatom.bfactor = float(tmplst[1])\n\t\t\t\t\t#myatom.occupancy = float(line[54:60])\n\t\t\t\t\t#myatom.bfactor = float(line[60:66])\n\t\t\t\t\tmyatom.rest = line[66:len(line)]\n\n\t\t\t\tself.__determineElement(myatom)\n\t\t\t\tmyres.addAtom(myatom)\n\t\t\t\t\n\t\t\t\tnlines += 1\n\n\t########\tif re_bk_tot.search(line):\n\t########\t\tcols = line.split()\n\t########\t\tself.bk_tot = float(cols[1])\n\n\t########\tif re_fa_rep.search(line):\n\t########\t\tcols = line.split()\n\t########\t\tself.fa_rep = float(cols[1])\n\n\t########\tif re_fa_atr.search(line):\n\t########\t\tcols = line.split()\n\t########\t\tself.fa_atr = float(cols[1])\n\n\t\tpdb.close()", "def process_file():\n global distances_between_cities\n global number_of_cities\n global unvisited_cities\n\n text_file = open(sys.argv[1].strip('\\r'))\n distances_between_cities = [[int(i) for i in line.strip(\"\\r\\n\").split()[1:]] for line in text_file.readlines()[1:]]\n number_of_cities = len(distances_between_cities)\n\n # set the initial conditions of the problem (you have already visited madrid)\n unvisited_cities = range(number_of_cities)\n visit_city(0)", "def read_conll_pos_file(path):\n sents = []\n with open(path, \"r\") as f:\n curr = []\n for line in f:\n line = line.strip()\n if line == \"\":\n sents.append(curr)\n curr = []\n else:\n tokens = line.strip().split(\"\\t\")\n curr.append((tokens[1], tokens[3]))\n return sents", "def _read(self, file_name):\n f = open(file_name)\n lines = f.readlines()\n begin = 0\n end = 0\n while end < len(lines):\n op = ''\n for l in lines[begin:]:\n end += 1\n op = l.split()[0]\n if op in operations:\n self.operations.append(op)\n break\n if op == '=push':\n nfa = Automaton(lines[begin:end - 1])\n self.aut_to_push.append(nfa)\n begin = end\n f.close()", "def getVocabList():\n vocab_list = []\n with open('vocab.txt') as f_obj:\n while True:\n vocab_line = f_obj.readline()\n if not vocab_line:\n break\n word = re.search(r'\\t(\\w+)', vocab_line).group(1)\n vocab_list.append(word)\n return vocab_list", "def read_file(filename):\n\n all_documents = []\n document = []\n with tf.gfile.GFile(filename, \"r\") as reader:\n for line in reader:\n line = line.strip()\n if not line:\n continue\n if line.lower()[:7] == \"chapter\":\n if document:\n all_documents.append(document)\n document = []\n else:\n document.append(line)\n if document:\n all_documents.append(document)\n\n return all_documents", "def read_conll_file(file_name):\n data = []\n current_words = []\n current_tags = []\n\n for line in codecs.open(file_name, encoding='utf-8'):\n line = line.strip()\n \n if line:\n if line[0] == '#':\n continue # skip comments\n tok = line.split('\\t')\n if '-' in tok[0] or '.' in tok[0]:\n continue # skip special tokenized words\n word = tok[1]\n tag = tok[3]\n \n current_words.append(word)\n current_tags.append(tag)\n else:\n if current_words: # skip empty lines\n data.append((current_words, current_tags))\n current_words = []\n current_tags = []\n\n # check for last one\n if current_tags != [] and not raw:\n data.append((current_words, current_tags))\n return data", "def read_file(path: str) -> Iterator[Problem]:\n with open(path) as f:\n txt = f.read()\n\n for encoded_problem in txt.split('\\n\\n'):\n yield parse_alpha_encoding(encoded_problem)", "def main(input_file, visualize):\n logging.info('Reading lines...')\n\n with open(input_file) as f:\n content = f.read()\n\n clauses, thesis = content.split('---\\n')\n\n logging.info('Parsing clauses...')\n parser = ClauseParser()\n parsed_clauses = parser.parse_cnf_list(clauses.splitlines())\n parsed_thesis = parser.parse_cnf_list(thesis.splitlines())\n\n result, tree = resolution(parsed_clauses, parsed_thesis)\n\n if visualize:\n display_resolution_tree(tree)\n\n logging.info(f'The thesis is {result}')", "def read_file(filename):\n\n all_documents = []\n document = []\n with tf.gfile.GFile(filename, \"r\") as reader:\n for line in reader:\n line = line.strip()\n line = tokenization.convert_to_unicode(line)\n line = line.replace(u\"\\u2018\", \"'\").replace(u\"\\u2019\", \"'\")\n sents = split_line_by_sentences(line)\n for sent_line in sents:\n if not sent_line or len(sent_line) < 4: # Arbitrary min length for line\n continue\n if sent_line.lower()[:7] == \"chapter\":\n if document:\n all_documents.append(document)\n document = []\n else:\n document.append(sent_line)\n if len(document) == FLAGS.max_para_length:\n all_documents.append(document)\n document = []\n if document:\n all_documents.append(document)\n\n # Remove small documents\n all_documents = [x for x in all_documents if len(x) >= 8]\n\n return all_documents", "def scan(self):\n self.tokfile = open(self.tokfile_path, 'w')\n word = ''\n for line in open(self.srcfile):\n for ch in line:\n if ch in alphanum: \n word += ch\n else:\n if word:\n try:\n self.print_tok('$int', int(word))\n except ValueError:\n if word in self.reserved: \n self.print_tok('$' + word)\n else:\n self.print_tok('$id', word)\n if ch in special:\n self.print_tok(ch)\n word = ''\n self.tokfile.close()", "def examples_from_file(path):\n examples = []\n\n # count total lines before loading\n total_lines = int(local('wc -l {}'.format(path), capture=True).split()[0])\n\n with codecs.open(path, 'r', encoding='utf-8') as f:\n for line in verboserate(f, desc='Reading data file.', total=total_lines):\n src, trg = line.strip().lower().split('\\t')\n src_words = src.split(' ')\n trg_words = trg.split(' ')\n assert len(src_words) > 0\n assert len(trg_words) > 0\n\n if use_diff:\n ex = EditExample.salient_diff(src_words, trg_words, free_set)\n else:\n ex = EditExample.whitelist_blacklist(src_words, trg_words)\n examples.append(ex)\n return examples", "def read(file):\n\n blocks = ['bus', 'load', 'fshunt', 'gen', 'branch', 'transf', 'area',\n 'twotermdc', 'vscdc', 'impedcorr', 'mtdc', 'msline', 'zone',\n 'interarea', 'owner', 'facts', 'swshunt', 'gne', 'Q']\n nol = [1, 1, 1, 1, 1, 4, 1,\n 0, 0, 0, 0, 0, 1,\n 0, 1, 0, 0, 0, 0]\n rawd = re.compile('rawd\\d\\d')\n\n retval = True\n version = 0\n b = 0 # current block index\n raw = {}\n for item in blocks:\n raw[item] = []\n\n data = []\n mdata = [] # multi-line data\n mline = 0 # line counter for multi-line models\n\n # parse file into raw with to_number conversions\n fid = open(file, 'r')\n for num, line in enumerate(fid.readlines()):\n line = line.strip()\n if num == 0: # get basemva and frequency\n data = line.split('/')[0]\n data = data.split(',')\n\n mva = float(data[1])\n freq = float(data[5])\n version = int(data[2])\n\n if not version:\n version = int(rawd.search(line).group(0).strip('rawd'))\n if version < 32 or version > 33:\n logging.warning('RAW file version is not 32 or 33. Error may occur.')\n continue\n elif num == 1: # store the case info line\n logging.info(line)\n continue\n elif num == 2:\n continue\n elif num >= 3:\n if line[0:2] == '0 ' or line[0:3] == ' 0 ': # end of block\n b += 1\n continue\n elif line[0] is 'Q': # end of file\n break\n data = line.split(',')\n\n data = [to_number(item) for item in data]\n mdata.append(data)\n mline += 1\n if mline == nol[b]:\n if nol[b] == 1:\n mdata = mdata[0]\n raw[blocks[b]].append(mdata)\n mdata = []\n mline = 0\n fid.close()\n\n # add device elements params and add to PSAT formatted dictionary\n\n for data in raw['bus']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10\n ID, NAME, BasekV, Type, Area Zone Owner Va, Vm, latitude longitude\n \"\"\"\n idx = data[0]\n ty = data[3]\n angle = data[8]\n try:\n lat = data[9]\n except:\n # logging.warning('<No Coordinates in .raw file>')\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5]]\n else:\n param = {'idx': idx,\n 'name': data[1],\n 'Vn': data[2],\n 'type': data[3],\n 'area': data[4],\n 'voltage': data[7],\n 'region': data[5],\n 'owner': data[6],\n 'angle': angle,\n 'latitude': data[9],\n 'longitude': data[10]\n }\n psatlist = [data[0], data[2], data[7], angle, data[4], data[5], data[9], data[10]]\n Settings.Bus.append(psatlist)\n Settings.BusNames.append(data[1])\n # Add BusSTORE Dictionary For Later Reference\n Settings.BusStore[idx] = param\n\n xcoord = [34.560040, 34.938385, 34.360040, 40.5152473, 40.3142473, 36.527401, 36.857401, 36.687401, 36.856401,\n 40.487041, 36.903901, 36.702901, 35.832561, 33.386047, 33.185047, 37.105571, 37.104154, 33.706718,\n 37.103549, 36.703539, 37.103559, 36.703549, 36.033561, 35.631561, 36.032561, 35.732561, 36.525401,\n 36.857401, 49.869314, 50.969314, 51.979314, 52.481674, 54.973192, 56.276212, 41.734596, 34.551015,\n 34.652015, 34.537507, 34.587507, 34.157904, 33.714453, 33.762453, 39.548160, 39.496160, 34.313143,\n 34.545782, 34.380686, 34.111686, 34.137762, 34.118650, 34.158650, 33.918650, 33.718650, 34.018650,\n 34.018650, 34.018650, 34.018650, 34.018650, 34.312456, 34.315456, 34.243600, 34.566258, 34.565258,\n 46.064672, 46.565672, 45.514571, 45.606833, 45.806833, 44.890000, 45.596416, 45.295416, 45.891161,\n 47.954899, 46.511440, 45.913936, 45.713936, 46.669335, 47.954899, 47.624154, 43.784730, 44.482350,\n 42.006860, 42.934919, 42.731919, 43.013135, 44.068350, 43.558350, 42.438350, 42.938350, 44.068350,\n 43.558350, 43.048350, 42.638350, 44.068350, 43.558350, 43.048350, 42.638350, 43.620189, 39.120428,\n 40.398031, 35.216200, 35.215200, 36.202099, 39.777745, 39.539598, 37.052929, 35.403217, 35.352217,\n 36.807243, 39.567450, 40.807689, 40.806689, 41.008689, 39.555494, 37.954721, 38.406721, 38.906721,\n 38.656721]\n ycoord = [-109.277313, -110.303798, -109.777313, -107.546455, -107.546455, -108.325669, -108.654569, -108.486669,\n -108.325669, -107.185575, -111.390408, -111.390408, -111.448566, -112.860397, -112.659397, -108.243555,\n -108.441191, -112.322033, -111.590816, -111.190816, -111.190816, -111.590806, -111.648566, -111.248566,\n -111.249566, -111.647566, -108.655669, -108.323669, -122.150895, -122.150895, -122.150895, -121.61684,\n -121.924221, -122.21370, -108.790427, -117.568105, -117.538105, -118.607375, -118.658375, -118.280282,\n -118.146319, -118.096319, -112.52797, -112.72797, -118.690631, -118.389938, -118.478496, -118.478496,\n -118.299917, -118.095428, -118.095428, -118.095428, -118.095428, -118.195428, -118.395428, -117.995428,\n -117.795428, -117.995428, -118.481217, -118.891217, -118.391667, -117.166428, -117.368428, -106.60906,\n -106.80906, -122.681289, -121.114785, -122.113785, -123.29000, -121.312202, -121.114202, -106.612578,\n -118.997945, -112.88531, -120.692286, -120.693974, -119.571501, -120.997945, -122.219492, -118.77463,\n -121.019484, -121.316546, -114.419206, -114.419206, -120.956476, -120.79484, -120.93484, -121.216546,\n -121.156546, -121.215484, -121.135484, -121.255484, -121.175484, -121.013484, -120.733484, -121.053484,\n -120.973484, -118.865882, -122.073631, -122.263453, -120.847567, -120.900567, -120.129849, -122.142965,\n -122.262993, -121.021929, -119.450452, -119.450452, -121.779037, -122.276225, -122.135718, -121.935718,\n -121.935718, -121.24000, -121.18379, -121.10879, -121.27379, -121.23979]\n\n #for idx, line in enumerate(Settings.Bus):\n # line.extend([xcoord[idx], ycoord[idx]])\n\n maxV = 1.1\n minV = 0.9\n maxQ = 1\n minQ = 0\n convimp = 0\n status = 1\n loss = 1\n\n for data in raw['load']:\n \"\"\"version 32:\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11\n Bus, Id, Status, Area, Zone, PL(MW), QL (MW), IP, IQ, YP, YQ, OWNER\n \"\"\"\n\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n voltage = Settings.BusStore[busidx]['voltage']\n param = {'bus': busidx,\n 'Vn': vn,\n 'Sn': mva,\n 'p': (data[5] + data[7] * voltage + data[9] * voltage ** 2) / mva,\n 'q': (data[6] + data[8] * voltage - data[10] * voltage ** 2) / mva,\n 'owner': data[11],\n 'type': Settings.BusStore[busidx]['type'],\n 'voltage': voltage\n }\n\n psatlist = [busidx, mva, vn, param['p'], param['q'], maxV, minV, convimp, status]\n Settings.PQ.append(psatlist)\n \"\"\"CONFIRM THAT OTHER BUSES HAVE 0 P and 0 Q which are not added\"\"\"\n\n for data in raw['fshunt']:\n \"\"\"\n 0, 1, 2, 3, 4\n Bus, name, Status, g (MW), b (Mvar)\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n param = {'bus': busidx,\n 'Vn': vn,\n 'status': data[2],\n 'Sn': mva,\n 'g': data[3] / mva,\n 'b': data[4] / mva,\n }\n\n psatlist = [busidx, mva, vn, freq, param['g'], param['b'], param['status']]\n Settings.Shunt.append(psatlist)\n\n gen_idx = 0\n type = 6\n\n for data in raw['gen']:\n \"\"\"\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 12, 13, 14, 15, 16,17,18,19\n I,ID,PG,QG,QT,QB,VS,IREG,MBASE,ZR,ZX,RT,XT,GTAP,STAT,RMPCT,PT,PB,O1,F1\n \"\"\"\n busidx = data[0]\n vn = Settings.BusStore[busidx]['Vn']\n gen_mva = data[8]\n gen_idx += 1\n status = data[14]\n leak = 0\n param = {'Sn': gen_mva,\n 'Vn': vn,\n 'u': status,\n 'idx': gen_idx,\n 'bus': busidx,\n 'pg': status * data[2] / mva,\n 'qg': status * data[3] / mva,\n 'qmax': data[4] / mva,\n 'qmin': data[5] / mva,\n 'v0': data[6],\n 'ra': data[9], # ra armature resistance\n 'xs': data[10], # xs synchronous reactance\n 'pmax': data[16] / mva,\n 'pmin': data[17] / mva,\n }\n\n if Settings.BusStore[busidx]['type'] == 3: #Check Bus Type for Slack\n refangle = 0\n refBus = 1\n PGuess = 1\n swlist = [busidx, gen_mva, vn, param['v0'], refangle, param['qmax'], param['qmin'],\n maxV, minV, PGuess, loss, refBus, status]\n SW = swlist\n Settings.SW.append(swlist)\n Settings.SWStore[busidx] = param\n Settings.SynStore[busidx] = param\n continue\n\n if busidx not in Settings.BusStore.keys():\n \"\"\" Need data from .dyr file. Create initial list, then append data from .dyr\"\"\"\n else:\n # psatlist = [busidx, gen_mva, vn, freq, type, leak, param['ra'],param['xs']]\n # Syn.append(psatlist)\n Settings.SynStore[busidx] = param\n pvlist = [busidx, gen_mva, vn, param['pg'], Settings.BusStore[busidx]['voltage'],\n param['qmax'], param['qmin'], maxV, minV, loss, status]\n Settings.PV.append(pvlist)\n\n\n for data in raw['branch']:\n \"\"\"\n I,J,ID,R,X,B,RATEA,RATEB,RATEC,GI,BI,GJ,BJ,ST,LEN,O1,F1,...,O4,F4\n \"\"\"\n param = {'bus1': data[0],\n 'bus2': data[1],\n 'id' : data[2],\n 'r': data[3],\n 'x': data[4],\n 'b': data[5],\n 'rate_a': data[6],\n 'rate_b': data[7],\n 'rate_c': data[8],\n 'Vn': Settings.BusStore[data[0]]['Vn'],\n 'Vn2': Settings.BusStore[data[1]]['Vn'],\n 'length': data[14],\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n 'status': data[13]\n }\n\n psatlist = [param['bus1'], param['bus2'], param['rate_c'], param['Vn'], freq, EMPTY,\n param['length'], param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['status']]\n Settings.Lineij.append([data[0], data[1], data[2]])\n Settings.Lineji.append([data[1], data[0], data[2]])\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.branches += 1\n Settings.linecount += 1\n Settings.LineBusMatij[param['bus2']].append(Settings.branches)\n Settings.LineBusMatji[param['bus1']].append(Settings.branches)\n\n for data in raw['transf']:\n \"\"\"\n I,J,K,CKT,CW,CZ,CM,MAG1,MAG2,NMETR,'NAME',STAT,O1,F1,...,O4,F4\n R1-2,X1-2,SBASE1-2\n WINDV1,NOMV1,ANG1,RATA1,RATB1,RATC1,COD1,CONT1,RMA1,RMI1,VMA1,VMI1,NTP1,TAB1,CR1,CX1\n WINDV2,NOMV2\n \"\"\"\n if len(data[1]) < 5:\n ty = 2\n else:\n ty = 3\n if ty == 3:\n continue\n # raise NotImplementedError('Three-winding transformer not implemented')\n\n tap = data[2][0]\n phi = data[2][2]\n\n if tap == 1 and phi == 0:\n trasf = False\n else:\n trasf = True\n param = {'trasf': trasf,\n 'bus1': data[0][0],\n 'bus2': data[0][1],\n 'u': data[0][11],\n 'b': data[0][8],\n 'r': data[1][0],\n 'x': data[1][1],\n 'tap': tap,\n 'phi': phi,\n 'rate_a': data[2][3],\n 'Vn': Settings.BusStore[busidx]['Vn'],\n 'Vn2': Settings.BusStore[busidx]['Vn'],\n # 'length': data[?][?], FIND CORRECT INDEX\n 'Ilim': EMPTY,\n 'Plim': EMPTY,\n 'Slim': EMPTY,\n }\n psatlist = [param['bus1'], param['bus2'], param['rate_a'], param['Vn'], freq, EMPTY,\n EMPTY, param['r'], param['x'], param['b'], param['Ilim'], param['Plim'], EMPTY, EMPTY,\n param['Slim'], param['u']]\n\n Settings.LineOrd[param['bus1']].append(psatlist)\n Settings.linecount += 1\n Settings.transformers += 1\n # ADD Line Data(All Branch Types) to Sys Param Dict after .dyr Transformer Data Added\n # Re-Order Line Data for correct sequence\n for key in Settings.LineOrd:\n for item in Settings.LineOrd[key]:\n Settings.Line.append(item)\n\n for data in raw['area']:\n Settings.Areas.append(data[4])\n\n for data in raw['zone']:\n Settings.Regions.append(data[1])\n\n return retval", "def read(fileName,filetransform):\n\n with open(fileName) as f:\n for li in f:\n li = li.strip()\n print(\"\")\n choice = input(\"You want to 'ENCRYPT' or 'DECRYPT : ' \" + li )\n print(\"\")\n with open(filetransform) as f:\n for line in f:\n line = line.strip()\n if \";\" in line:\n simulate(li,choice,line)\n else:\n userchoice(li,choice,line)", "def read_in_file(self, file):\n with open(self.file) as doc:\n trie = dict()\n for line in doc:\n line = line.split(' ')[0]\n self.make_trie(trie, line.rstrip())\n return trie" ]
[ "0.59768933", "0.5921746", "0.5887054", "0.5878683", "0.5846625", "0.58407384", "0.58056444", "0.5785878", "0.5751106", "0.57284236", "0.5727432", "0.5689564", "0.5685143", "0.56707704", "0.56541115", "0.5645458", "0.5632656", "0.56321144", "0.56046677", "0.55909306", "0.557193", "0.5569897", "0.55533683", "0.55276847", "0.55160534", "0.5498457", "0.54946786", "0.5486191", "0.5484421", "0.5473975" ]
0.71953326
0
Method that finds all the nodes Person in the data base
def findAllPerson(tx): query = ( "MATCH (p:Person) " "RETURN p , ID(p);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_persons(self):\r\n return self.__person_repository.elements", "def get_people(self):\n cursor = self.cur()\n cursor.execute('SELECT * FROM {tn} '.format(tn=\"person\"))\n all_people = cursor.fetchall()\n return all_people", "def list_people():\n\n person_list = []\n for person in person_database:\n person_list.append(person)\n return person_list", "def populate_person(root) :\n\tfor person in root.findall(\"Person\"):\n\t\ttemp_person = Person()\n\t\ttemp_person.person_ID = person.get(\"ID\")\n\t\ttemp_person.name = person.get(\"Name\")\n\n\t\tif person.find(\"Kind\") is not None :\n\t\t\ttemp_person.kind = person.find(\"Kind\").text\n\t\tif person.find(\"Location\") is not None :\n\t\t\ttemp_person.location = person.find(\"Location\").text\n\n\t\tfor crisis in person.iter(\"Crisis\") :\n\t\t\ttemp_relations = Relations()\n\t\t\tcheck = Relations.objects.filter(crisis_ID = crisis.get(\"ID\"), person_ID = person.get(\"ID\"))\n\t\t\tif len(check) == 0:\n\t\t\t\ttemp_relations.populate(c_id = crisis.get(\"ID\"), p_id = person.get(\"ID\"))\n\t\t\t\ttemp_relations.save()\n\n\t\tfor org in person.iter(\"Org\") :\n\t\t\ttemp_relations = Relations()\n\t\t\tcheck = Relations.objects.filter(org_ID = org.get(\"ID\"), person_ID = person.get(\"ID\"))\n\t\t\tif len(check) == 0:\n\t\t\t\ttemp_relations.populate(p_id = person.get(\"ID\"), o_id = org.get(\"ID\"))\n\t\t\t\ttemp_relations.save()\n\n\t\tpopulate_common(person, person.get(\"ID\"), temp_person)\n\t\ttemp_person.save()", "def persons(self):\r\n return persons.Persons(self)", "def get_all_neighbor_nodes_person(node: Node) -> list:\n if node is None:\n return []\n\n personroot = get_personroot_node(node)\n if personroot is None:\n return []\n\n neighbor_nodes = get_all_neighbor_nodes(personroot, category_want='person')\n neighbor_nodes.append(personroot)\n\n return neighbor_nodes", "def get_persons(self):\n return self.person_list.model().get_person_list()", "def read_people():\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT * FROM person LIMIT {0};\".format(settings.search_result_row_limit))\n p = []\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n p.append(_person)\n conn.close()\n return p\n except:\n return []", "def get_all_personroot_nodes(node: Node) -> list:\n if node is None:\n return []\n\n if node['category'] != 'person':\n return []\n\n personroot_nodes = []\n if node['name'] == 'person-root':\n personroot_nodes.append(node)\n return personroot_nodes\n\n edges = get_edges(node)\n if len(edges) == 0:\n print('get_personroot_node(): warning, \"person\" node with _key \"' + node['_key'] + '\"')\n print(' has 0 neighbors, that should not happen, continuing...')\n return []\n\n for edge in edges:\n next_node = edge.end_node\n if node == next_node:\n continue\n if next_node['name'] == 'person-root':\n personroot_nodes.append(next_node)\n continue\n\n return personroot_nodes", "def get_all_persons_list(self):\n self.__load_persons_from_file_into_memory()\n return super().get_all_persons_list()", "def findAll(tx):\n query = (\n \"MATCH (n1)-[r]->(n2) \"\n \"RETURN n1 AS node1 , r AS relationship , n2 AS node2 \"\n )\n\n result = tx.run(query)\n return [(record[\"node1\"], record[\"relationship\"], record[\"node2\"]) for record in result]", "def select_all_nodes(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Nodes\")\n \n rows = cur.fetchall()\n return rows", "def select_all_persons(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM person\")\n\n rows = cur.fetchall()\n\n return rows # return the rows", "def find_all(self):\n pass", "def find_objs(self, cls, **attr):\n nodes = getattr(self.graph, getattr(models, cls).element_plural).query(**attr).all()\n return nodes", "def get_nodes(self):\n pass", "def _get_nodes(self):\n viewpoint = \"shiva_{}\".format(cherrypy.session[\"id\"])\n messages_db = self.mongo[viewpoint][\"messages\"]\n people_db = self.mongo[viewpoint][\"people\"]\n #\n senders = messages_db.distinct(\"sender\")\n owner_id = cherrypy.session[\"id\"]\n nodes = list()\n for sender in senders:\n person = people_db.find_one({\"id\": sender})\n if person is None:\n name = \"id{}\".format(sender)\n else:\n name = person[\"display_name\"]\n records = list(messages_db.aggregate([{\n \"$match\": {\n \"$or\": [\n {\"sender\": owner_id, \"receiver\": sender},\n {\"sender\": sender, \"receiver\": owner_id}\n ]\n }\n }, {\"$group\": {\"_id\": None, \"count\": {\"$sum\": 1}}}]))\n if not records:\n records = 0\n else:\n records = records[0][\"count\"]\n info = \"Total records: {}\".format(records)\n history_link = \"/vk/read?id={}\".format(sender)\n statistics_link = \"#\"\n if records > 0:\n nodes.append({\n \"id\": sender,\n \"name\": name,\n \"info\": info,\n \"records\": records,\n \"history_link\": history_link,\n \"statistics_link\": statistics_link\n })\n #\n return nodes", "def get_persons(self, language=None):\n return self.get_direct_related_page_extensions(\n Person, PersonPluginModel, language=language\n )", "def view_all_persons():\n message = ''\n global conn\n with conn:\n rows = select_all_persons(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Person Table', message)", "def find_all(self):", "def __ui_list_all_persons(self):\n persons_list = self.__person_service.service_get_persons_list()\n\n if len(persons_list) == 0:\n print(\"The list of persons is empty!\")\n else:\n print(\"The list of persons in your agenda:\")\n for person in persons_list:\n print(\" \" + str(person))\n print(\"\")", "def nodes(self):\n return self._get_tree_queryset()", "def iter_nodes(self):", "def see_all():\n database = get_connection()\n patients_in_db = []\n patient: dict = database.patients.find()\n for p in patient:\n pat = p[\"patient_data\"]\n patients_in_db.append(pat)\n print(patients_in_db)\n return patients_in_db", "def findall_nodes(self):\n\n nodes = []\n for n in self.nodes:\n nodes += n.findall_forward()\n\n # Make sure list only contains every element once\n nodes = dict((k,1) for k in nodes)\n self.nodes = list(nodes.keys())\n self.connect_backwards()", "def nodes(self): \n return [n for n in self.iternodes()]", "def get_persons(self):\n response = self.do_request('/management/persons/export/json/')\n if response:\n return response.json()", "def persons(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/persons'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def all_persons(self):\n all_persons = {}\n all_persons.update(self.staff)\n all_persons.update(self.fellows)\n return all_persons", "def findAllHome(tx):\n query = (\n \"MATCH (h:House) \"\n \"RETURN h , ID(h);\"\n )\n results = tx.run(query).data()\n return results" ]
[ "0.7161175", "0.70688295", "0.6628899", "0.6604041", "0.658471", "0.65612173", "0.6507352", "0.6468968", "0.6391832", "0.62645686", "0.6208956", "0.61638427", "0.6148602", "0.6058281", "0.6046598", "0.59861124", "0.5982131", "0.5963654", "0.59491247", "0.5935303", "0.5908394", "0.5894036", "0.58805645", "0.587908", "0.5875136", "0.5832542", "0.5831576", "0.58159477", "0.581338", "0.57868916" ]
0.7555326
0
Method that finds all the nodes House in the data base
def findAllHome(tx): query = ( "MATCH (h:House) " "RETURN h , ID(h);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findall_nodes(self):\n\n nodes = []\n for n in self.nodes:\n nodes += n.findall_forward()\n\n # Make sure list only contains every element once\n nodes = dict((k,1) for k in nodes)\n self.nodes = list(nodes.keys())\n self.connect_backwards()", "def select_all_nodes(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Nodes\")\n \n rows = cur.fetchall()\n return rows", "def get_nodes(self):\n pass", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def getNodes(self):\n return self.__allNodes", "def getOthNodes( self ):\n\n if self.othNodes:\n return self.othNodes.keys()\n\n if not self.othNames:\n self.getOthNames( )\n\n for id1 in self.othNames.values():\n nNodes = self.adb.get(\t \"nOthNodes\", id1\t)\n for id3 in range(nNodes):\n nd = self.adb.get(\t \"othNode\", id1,id3 )\n self.othNodes[ nd ] = id3\n self.othNodes[ str( nd ) ] = id3\n\n return self.othNodes.keys()", "def get_all_nodes(self):\n return self._get_all_nodes()", "def get_nodes(self):\n return requests.get(self.__url + 'nodes').json()", "def nodes(self): \n return [n for n in self.iternodes()]", "def find_all(self):\n pass", "def get_hnodes(self,h):\n t_nodes = self.get_h(h)\n for t_node in t_nodes:\n t_node = self.tree.get_node(t_node)\n self.check_childs(t_node.identifier)", "def find_all(self):", "def getNodeTests():\n\n nodeTestsQuery = NodeTest.query.all()\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None", "def getNodes(self):\n nodes = [{\"address\": \"http://0.0.0.0:100\"}\n ,{\"address\": \"http://0.0.0.0:200\"}\n ,{\"address\": \"http://0.0.0.0:300\"}\n ,{\"address\": \"http://0.0.0.0:400\"}\n ,{\"address\": \"http://0.0.0.0:500\"}]\n return nodes", "def iter_nodes(self):", "def getNodes(self):\n data = self.connect('get','nodes',None)\n return data", "def _get_nodes(self):\n viewpoint = \"shiva_{}\".format(cherrypy.session[\"id\"])\n messages_db = self.mongo[viewpoint][\"messages\"]\n people_db = self.mongo[viewpoint][\"people\"]\n #\n senders = messages_db.distinct(\"sender\")\n owner_id = cherrypy.session[\"id\"]\n nodes = list()\n for sender in senders:\n person = people_db.find_one({\"id\": sender})\n if person is None:\n name = \"id{}\".format(sender)\n else:\n name = person[\"display_name\"]\n records = list(messages_db.aggregate([{\n \"$match\": {\n \"$or\": [\n {\"sender\": owner_id, \"receiver\": sender},\n {\"sender\": sender, \"receiver\": owner_id}\n ]\n }\n }, {\"$group\": {\"_id\": None, \"count\": {\"$sum\": 1}}}]))\n if not records:\n records = 0\n else:\n records = records[0][\"count\"]\n info = \"Total records: {}\".format(records)\n history_link = \"/vk/read?id={}\".format(sender)\n statistics_link = \"#\"\n if records > 0:\n nodes.append({\n \"id\": sender,\n \"name\": name,\n \"info\": info,\n \"records\": records,\n \"history_link\": history_link,\n \"statistics_link\": statistics_link\n })\n #\n return nodes", "def select_all_topologies(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM topologies_topology\")\n \n rows = cur.fetchall()\n \n for row in rows:\n print(row)", "def findAllLiveRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:LIVE]->(n2:House) \"\n \"RETURN ID(n1) , r , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def nodes(self):\n return self._get_tree_queryset()", "def get_all_neighbor_nodes_person(node: Node) -> list:\n if node is None:\n return []\n\n personroot = get_personroot_node(node)\n if personroot is None:\n return []\n\n neighbor_nodes = get_all_neighbor_nodes(personroot, category_want='person')\n neighbor_nodes.append(personroot)\n\n return neighbor_nodes", "def find_objs(self, cls, **attr):\n nodes = getattr(self.graph, getattr(models, cls).element_plural).query(**attr).all()\n return nodes", "def test_get_nodes(self):\n wp22_rdf_graph = parse_rdf(WP22)\n wp706_rdf_graph = parse_rdf(WP706)\n wp1871_rdf_graph = parse_rdf(WP1871)\n wp2799_rdf_graph = parse_rdf(WP2799)\n\n nodes_wp22 = _get_nodes(wp22_rdf_graph)\n nodes_wp706 = _get_nodes(wp706_rdf_graph)\n nodes_wp1871 = _get_nodes(wp1871_rdf_graph)\n nodes_wp2799 = _get_nodes(wp2799_rdf_graph)\n\n self.assertEqual(len(nodes_wp22), 17)\n self.assertEqual(len(nodes_wp706), 186)\n self.assertEqual(len(nodes_wp1871), 115)\n self.assertEqual(len(nodes_wp2799), 141)", "def get_node_list(self):\n logger.debug('Retrieving node list')\n self.node_ids = []\n\n # Iterate over interfaces, try to grab gateway ipv4 addr\n # Try to /ping gateway over TCP using default port.. if we get a pong, we may get a node ID\n gateways = netifaces.gateways()\n gateways = gateways.get(netifaces.AF_INET, [])\n\n for gateway in gateways:\n node_id = gateway[0]\n node = self.select_node(node_id)\n info = node.get_info()\n\n if info and info.get('node'):\n logger.debug('Found node with ID \"%s\"', node_id)\n self.node_ids.append(node_id)\n\n return self.node_ids", "def fusion_api_get_ha_nodes(self, uri=None, param='', api=None, headers=None):\n return self.ha_nodes.get(uri=uri, api=api, headers=headers, param=param)", "def get_all_metadata(self):\n return self.db.get_all_nodes()", "async def test_list_entity_neighbors(self):\n await test_service.list_entity_neighbors(self)", "def get_neighbours(self):\n return []", "def get_root_nodes(self):\n\n selector = \"forest\"\n desc_uids = self[selector, \"desc_uid\"]\n rids = np.where(desc_uids == -1)[0]\n for rid in rids:\n yield self.get_node(selector, rid)", "def nodes( self, data = False ):\n return self._G.nodes(data = data)" ]
[ "0.62917864", "0.6276443", "0.59888303", "0.5921003", "0.5874497", "0.58430326", "0.5826976", "0.5783992", "0.577711", "0.5747573", "0.5735488", "0.572251", "0.57179743", "0.5706575", "0.5700117", "0.56823605", "0.56663543", "0.56605154", "0.5656091", "0.56519186", "0.5634304", "0.5626857", "0.56194335", "0.561017", "0.5591521", "0.5584912", "0.55739725", "0.55619293", "0.5561851", "0.5555473" ]
0.6946899
0
Method that finds all the nodes Location in the data base
def findAllLocation(tx): query = ( "MATCH (l:Location) " "RETURN l , ID(l);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_locations(self):", "def get_all_locations():\n rs = run_query('''select * from zlrz_office_location''')\n return [] if rs is None else list(map(lambda t: Location(t[1], t[2], t[3], t[4], t[5], t[0]), rs))", "def locations(self):\n return self.data.get(\"locations\", [])", "def create_locations(self):\n nodes = self.tree.xpath(self.pattern.xpath_locations)\n seq_num = 0\n\n locations = []\n for node in nodes:\n content = self.create_content(node)\n\n if content:\n location = self.create_location(content, seq_num, node)\n\n # Store the location node in the location object, for referencing in future\n # processing, if necessary\n location.node = node\n locations.append(location)\n seq_num += 1\n\n return locations", "def findAllLocations(cls):\r\n return cls.query.all()", "def get_location_list(self) -> DBRecList:\n raise NotImplementedError('not implemented')", "def get_locations(self):\n self.locations = {} # reset dictionary\n for node in self.extant_p:\n if node.host not in self.locations:\n self.locations.update({node.host: []})\n self.locations[node.host].append(node)", "def locations(self):\n return self.__locations", "def locations(self):\n return self.__locations", "def locations(self):\n return self.__locations", "def locations(self):\n return self.__locations", "def locations(self):\n return self.__locations", "def locations(self):\n return self.__locations", "def locations(self):\n return self.__locations", "def get_locations(self):\n try:\n output_json = {}\n total_locations = list(self.mongo_db_object.find_all(AppConfigurations.MONGO_DATABASE,\n AppConstants.LOCATION.MONGO_LOCATION_COLLECTION_NAME))\n output_json = total_locations\n return AppConstants.result_success_template(output_json)\n\n except Exception as e:\n print(\"Error while fetching the Location Data.\", str(e))", "def locationsFromDBS(self, dbs, dataItems):\n result = defaultdict(set)\n for dataItem in dataItems:\n try:\n if isDataset(dataItem):\n phedexNodeNames = dbs.listDatasetLocation(dataItem)\n else:\n phedexNodeNames = dbs.listFileBlockLocation(dataItem)\n result[dataItem].update(phedexNodeNames)\n except Exception as ex:\n self.logger.error('Error getting block location from dbs for %s: %s', dataItem, str(ex))\n\n # convert the sets to lists\n for name, nodes in viewitems(result):\n psns = set()\n psns.update(self.cric.PNNstoPSNs(nodes))\n result[name] = list(psns)\n\n return result", "def locations(self):\n return self._locations", "def getLocations(nodes, urls):\n\ttheurls = dict((u, urls[u]) for u in nodes)\n\tloclist = [urllib.parse.urlparse(url).netloc for url in theurls]", "def get_all_locations(self,warehouse=False,location=False):\n ## finding all location along with it's child locations for given warehouse\n all_locations=[]\n if warehouse:\n locations= self.env['stock.location'].search([('usage','=','internal'),('id','in',warehouse.view_location_id.child_ids.ids)])\n else:\n locations = location\n\n for location in locations:\n child_locations_list = self.get_child_locations(location)\n if child_locations_list:\n child_locations_list = set(child_locations_list)\n ## adding all child in to one list \n for child in child_locations_list:\n all_locations.append(child) \n return all_locations", "def getAllLocation(table):\n\tlocs = []\n\n\tnum = len(table)\n\n\tfor i in range(num):\n\t\t# first field is the name\n\t\tloc = getLatAndLong(table[i][1])\n\n\t\tlocs.append(loc)\n\n\treturn locs", "def locations():\n sql = \"\"\"SELECT DISTINCT sample_location\n FROM barcodes.sample\n ORDER BY sample_location\"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql)\n return pm.sql.TRN.execute_fetchflatten()", "def load(self):\n return list(self.obj.locations_set.all())", "def get_locations(self) -> list:\n return self.client.locations.get_all()", "def _get_locations(self):\n data = self._get(\"/locations\")\n if \"locations\" not in data:\n _LOGGER.error(\"Did not find locations\")\n raise AirthingsError(data)\n return [AirthingsLocation(d, self) for d in data[\"locations\"]]", "def locations(self):\r\n return Locations(self)", "def get_all_locations():\n with mysql.db_session(read_only=True) as session:\n locations = session.query(Location)\n\n if not locations:\n return response.create_not_found_response(message='No data found.')\n locations_list = [location.to_dict() for location in locations.all()]\n\n return response.Response(message=locations_list)", "def locations(self):\n node = self.dismod_file.node\n assert not ({\"node_id\", \"node_name\", \"parent\"} - set(node.columns))\n if \"c_location_id\" not in node.columns:\n node = node.assign(c_location_id=node.node_id)\n location_map = node[[\"node_id\", \"c_location_id\"]].rename(\n columns={\"node_id\": \"parent\", \"c_location_id\": \"parent_location_id\"})\n parent_location = node.merge(\n location_map, on=\"parent\", how=\"left\")\n missing = parent_location[parent_location.parent_location_id.isna()]\n if len(missing) > 1: # Root will have nan for parent.\n raise ValueError(f\"parent location IDs unknown {missing}\")\n return parent_location.rename(columns=dict(\n parent_location_id=\"parent_id\", c_location_id=\"location_id\",\n node_name=\"name\"\n ))[[\"parent_id\", \"location_id\", \"name\", \"node_id\"]]", "def locations(self):\r\n return resource.Location(self)", "def get_child_locations(self,location):\n child_list=[]\n child_list.append(location.id)\n child_locations = self.env['stock.location'].search([('usage','=','internal'),('location_id','=',location.id)])\n if child_locations:\n for child_location in child_locations:\n child_list.append(child_location.id)\n ## recursive calling to find child of child lcoations\n children_loc = self.get_child_locations(child_location)\n ## adding child into one list\n for child in children_loc:\n child_list.append(child)\n return child_list", "def get_locations(db_path: str) -> List[Location]:\n locations: List[Location] = []\n conn: Connection = sqlite3.connect(path.join(db_path, 'company_data.db'))\n cur: Cursor = conn.cursor()\n for row in cur.execute('SELECT name, area, climate FROM locations'):\n locations.append(Location(row[0], row[1], Climate(row[2])))\n\n cur.close()\n conn.close()\n return locations" ]
[ "0.774577", "0.6802821", "0.67363733", "0.6704429", "0.67019314", "0.66809297", "0.66094553", "0.6573647", "0.6573647", "0.6573647", "0.6573647", "0.6573647", "0.6573647", "0.6573647", "0.6506568", "0.6504786", "0.6484652", "0.6475976", "0.64413804", "0.63739413", "0.62944204", "0.62699264", "0.62626994", "0.62403786", "0.62365884", "0.6220317", "0.61718726", "0.61326444", "0.60851806", "0.6036262" ]
0.7687341
1
Method that finds all the nodes Vaccine in the data base
def findAllVaccine(tx): query = ( "MATCH (v:Vaccine) " "RETURN v , ID(v);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findAllGetVaccineRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) \"\n \"RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def listVrayNodes():\r\n return [node.name() for node in nuke.allNodes() if 'VRay' in node.Class()]", "def get_all_v(self) -> dict:\n return self.Nodes", "def get_all_v(self) -> dict:\n return self.nodes", "def getNodes(self):\n return self.__allNodes", "def get_nodes(self):\n pass", "def nodes(self): \n return [n for n in self.iternodes()]", "def getNodes(self):\n data = self.connect('get','nodes',None)\n return data", "def getVaccinesId(tx):\n query = (\n \"MATCH (v:Vaccine)\"\n \"RETURN ID(v)\"\n )\n\n idsList = tx.run(query).data()\n return idsList", "def get_nodes(self):\n return requests.get(self.__url + 'nodes').json()", "def findall_nodes(self):\n\n nodes = []\n for n in self.nodes:\n nodes += n.findall_forward()\n\n # Make sure list only contains every element once\n nodes = dict((k,1) for k in nodes)\n self.nodes = list(nodes.keys())\n self.connect_backwards()", "def get_all_nodes(self):\n return self._get_all_nodes()", "def list():\n index = 0\n while True:\n node = Node.from_index(index)\n if os.path.exists(node.path()):\n click.echo(f'{index}: node_{index}')\n click.echo(run_lncli(node, 'getinfo | jq .identity_pubkey'))\n else:\n break\n index += 1", "def list_nodes(self):\n return self.ironic_client.node.list()", "def mme_nodes(mme_base_url, token):\n nodes = []\n if not mme_base_url or not token:\n return nodes\n url = ''.join([mme_base_url, '/nodes'])\n nodes = matchmaker_request(url=url, token=token, method='GET')\n LOG.info('Matchmaker has the following connected nodes:{}'.format(nodes))\n return nodes", "def list_nodes(self):\n\n return list(\n dict(\n self._from_json(self.manage.run(override=\"list-nodes\"))\n ).keys()\n )", "def createNodeVaccines(vaccinesList):\n vaccinesQuery = []\n for vaccineEl in vaccinesList:\n currentQuery = (\n \"CREATE (v:Vaccine {name: \\\"\" + str(vaccineEl[int(VaccineAttribute.NAME)]) + \"\\\" , producer: \\\"\" +\n str(vaccineEl[int(VaccineAttribute.PRODUCER)]) + \"\\\"}); \"\n )\n vaccinesQuery.append(currentQuery)\n return vaccinesQuery", "def select_all_nodes(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Nodes\")\n \n rows = cur.fetchall()\n return rows", "def list_nodes(self):\n return self.datanodes.keys()", "def get_nodes(self):\n try:\n for node in Linode.search(status=Linode.STATUS_RUNNING):\n self.add_node(node)\n except chube_api.linode_api.ApiError, e:\n print \"Looks like Linode's API is down:\"\n print\n print e\n sys.exit(1)", "def get_node_list(self):\n logger.debug('Retrieving node list')\n self.node_ids = []\n\n # Iterate over interfaces, try to grab gateway ipv4 addr\n # Try to /ping gateway over TCP using default port.. if we get a pong, we may get a node ID\n gateways = netifaces.gateways()\n gateways = gateways.get(netifaces.AF_INET, [])\n\n for gateway in gateways:\n node_id = gateway[0]\n node = self.select_node(node_id)\n info = node.get_info()\n\n if info and info.get('node'):\n logger.debug('Found node with ID \"%s\"', node_id)\n self.node_ids.append(node_id)\n\n return self.node_ids", "def iter_nodes(self):", "def getNodes(self):\n nodes = [{\"address\": \"http://0.0.0.0:100\"}\n ,{\"address\": \"http://0.0.0.0:200\"}\n ,{\"address\": \"http://0.0.0.0:300\"}\n ,{\"address\": \"http://0.0.0.0:400\"}\n ,{\"address\": \"http://0.0.0.0:500\"}]\n return nodes", "def get_nodes(self):\n try:\n return list(self._adjacency_list.keys())\n except Exception as error:\n print(f'An error occurred: {error}')", "def get_nodes(self):\n self.get_status()\n old_api = self.version[0] <= '3'\n if old_api:\n certs_path = \"%s/certificate_statuses/*\" % (self.environment)\n nodeinfo_path_tpl = \"{env}/node/{node}\"\n else:\n certs_path = \"puppet-ca/v1/certificate_statuses/no_key?environment=%s\" % (self.environment)\n nodeinfo_path_tpl = \"puppet/v3/node/{node}?environment={env}\"\n\n csts = self._send('GET', certs_path)\n nodes_names = []\n for cst in csts:\n nodes_names.append(cst['name'])\n\n all_nodes = []\n for nname in nodes_names:\n path = nodeinfo_path_tpl.format(node=nname, env=self.environment)\n nodeinfo = self._send('GET', path)\n if old_api:\n nodeinfo = self._from_pson(nodeinfo['data'])\n else:\n nodeinfo = self._from_pson(nodeinfo)\n if 'parameters' in nodeinfo:\n node = nodeinfo['parameters']\n if self.onlynodes:\n if not (node.get('hostname') in self.onlynodes or\n node.get('ipaddress') in self.onlynodes or\n node.get('fqdn') in self.onlynodes or\n node.get('uuid') in self.onlynodes):\n continue\n all_nodes.append(node)\n\n return all_nodes", "def get_nodes(self):\n self.map_graph_id()\n self.nodes_list = [\n self.NX_GRAPHS[self.graph_id].nodes[idx]['label'] \n for idx in range(len(self.NX_GRAPHS[self.graph_id].nodes))]", "def nodes(self):\n return self._get_tree_queryset()", "def nodes(self):\n return self.__nodes", "def nodes (self):\n return self.__nodes", "def get_all_nodes(self):\n # NOTE: return copy, so no one will screw\n # our list?\n return self.nodes" ]
[ "0.6894898", "0.6524139", "0.6447016", "0.6415178", "0.6214899", "0.61830765", "0.611974", "0.6066176", "0.6058976", "0.6056742", "0.60356337", "0.5981315", "0.59691083", "0.593374", "0.59108174", "0.58802783", "0.5873629", "0.5873455", "0.58573043", "0.5813888", "0.5751181", "0.57397085", "0.57368875", "0.56870806", "0.56542844", "0.563899", "0.5631433", "0.5603073", "0.559674", "0.5593412" ]
0.75888234
0
Method that finds all the nodes Test in the data base
def findAllTest(tx): query = ( "MATCH (t:Test) " "RETURN t , ID(t);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNodeTests():\n\n nodeTestsQuery = NodeTest.query.all()\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None", "def List(ctx):\n \"\"\"Note: This method is available only through the per-node API endpoint 5.0 or later.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\")\n try:\n ListTestsResult = ctx.element.list_tests()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(ListTestsResult, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def findAllMakeTestRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:MAKE_TEST]->(n2:Test) \"\n \"RETURN ID(n1) , r , r.date , r.hour , r.result , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def test(self):\r\n # Establish connection and execute a query that returns\r\n # a table with the names of each table in the database.\r\n # Close the connection. Convert the table to a list and\r\n # return it.\r\n try:\r\n Connection = mariadb.connect(\r\n user = self.Name,\r\n host = self.Host,\r\n password= self.Password,\r\n port=3306)\r\n TestQuery = Connection.cursor()\r\n TestQuery.execute('USE moleculardata')\r\n TestQuery.execute('SHOW TABLES')\r\n TestQuery.close()\r\n Connection.close()\r\n return [j for sub in TestQuery for j in sub]\r\n # Exception to catch database errors. Exceptions could include:\r\n # problem connecting to the database or errors in the data query\r\n # request. \r\n # Returns an empty list.\r\n except mariadb.Error as e:\r\n print('Unable open connection {}.'.format(e))\r\n return[]", "def get_nodes():\n return conf.config.get_nodes(RELATIVE_PATH_FIXTURES_HOST)", "def test_get_nodes(self):\n wp22_rdf_graph = parse_rdf(WP22)\n wp706_rdf_graph = parse_rdf(WP706)\n wp1871_rdf_graph = parse_rdf(WP1871)\n wp2799_rdf_graph = parse_rdf(WP2799)\n\n nodes_wp22 = _get_nodes(wp22_rdf_graph)\n nodes_wp706 = _get_nodes(wp706_rdf_graph)\n nodes_wp1871 = _get_nodes(wp1871_rdf_graph)\n nodes_wp2799 = _get_nodes(wp2799_rdf_graph)\n\n self.assertEqual(len(nodes_wp22), 17)\n self.assertEqual(len(nodes_wp706), 186)\n self.assertEqual(len(nodes_wp1871), 115)\n self.assertEqual(len(nodes_wp2799), 141)", "def get_nodes(self):\n pass", "def test_get_hyperflex_node_list(self):\n pass", "def find_all(self):\n pass", "def getNodeTestsForPolicy(policyTestId):\n\n nodeTestsQuery = NodeTest.query.filter_by(_policy_test_id=policyTestId)\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None", "def find_all(self):", "def test_get_related_nodes(self):\n pass", "def getNodes(self):\n return self.__allNodes", "def get_nodes(self):\n return requests.get(self.__url + 'nodes').json()", "def findall_nodes(self):\n\n nodes = []\n for n in self.nodes:\n nodes += n.findall_forward()\n\n # Make sure list only contains every element once\n nodes = dict((k,1) for k in nodes)\n self.nodes = list(nodes.keys())\n self.connect_backwards()", "def get_all_nodes(self):\n return self._get_all_nodes()", "def list_nodes(self):\n\n return list(\n dict(\n self._from_json(self.manage.run(override=\"list-nodes\"))\n ).keys()\n )", "def get_nodes(self):\n try:\n for node in Linode.search(status=Linode.STATUS_RUNNING):\n self.add_node(node)\n except chube_api.linode_api.ApiError, e:\n print \"Looks like Linode's API is down:\"\n print\n print e\n sys.exit(1)", "def testGetAllPhEDExNodeNames(self):\n result = self.mySiteDB.getAllPhEDExNodeNames(excludeBuffer=True)\n self.assertFalse([pnn for pnn in result if pnn.endswith('_Buffer')])\n\n result = self.mySiteDB.getAllPhEDExNodeNames(excludeBuffer=False)\n self.assertTrue(len([pnn for pnn in result if pnn.endswith('_Buffer')]) > 5)\n\n result = self.mySiteDB.getAllPhEDExNodeNames(pattern='T1.*', excludeBuffer=True)\n self.assertFalse([pnn for pnn in result if not pnn.startswith('T1_')])\n self.assertTrue(len(result) > 10)\n\n result = self.mySiteDB.getAllPhEDExNodeNames(pattern='.*', excludeBuffer=True)\n self.assertTrue([pnn for pnn in result if pnn.startswith('T1_')])\n self.assertTrue([pnn for pnn in result if pnn.startswith('T2_')])\n self.assertTrue([pnn for pnn in result if pnn.startswith('T3_')])\n self.assertTrue(len(result) > 60)\n\n return", "def get_all_setups_nodes():\n ta_roots = get_all_setups_roots()\n ta_nodes = [TechAnim_Setup(x) for x in ta_roots]\n return ta_nodes", "def iter_nodes(self):", "def list_nodes(self):\n return self.ironic_client.node.list()", "def _DiscoverTests(root_dirs: List[Text],\n test_to_shards: Dict[Text, int]) -> List[_Test]:\n result = []\n for d in root_dirs:\n for root, _, files in os.walk(d):\n for f in files:\n if f.endswith(_TEST_FILENAME_SUFFIX):\n shards = test_to_shards.get(f, 1)\n for shard in range(0, shards):\n result.append(_Test(os.path.join(root, f), shard, shards))\n logging.info(\"Discovered %d tests\", len(result))\n return result", "def sequence(self):\n for tn in self._testnodes:\n yield tn", "def select_all_nodes(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Nodes\")\n \n rows = cur.fetchall()\n return rows", "def nodes(self):\n return self._get_tree_queryset()", "def list_nodes(self):\n return self.datanodes.keys()", "def test_tree(self):\n root = role_middleware.get_root()\n tree_list = role_middleware.get_tree(root.id)\n role_middleware.force_refresh()\n print(tree_list)", "def get_all_metadata(self):\n return self.db.get_all_nodes()", "def getTestData(self):\n raise NotImplementedError" ]
[ "0.77383196", "0.67398024", "0.63996077", "0.6377466", "0.6319616", "0.63012266", "0.62887144", "0.620618", "0.61972755", "0.6186909", "0.609754", "0.6096818", "0.6086778", "0.60831773", "0.6025748", "0.5991007", "0.5980183", "0.59563565", "0.59473395", "0.5940827", "0.59354633", "0.59155154", "0.59069103", "0.58964866", "0.589444", "0.5893154", "0.58927757", "0.5891711", "0.5885103", "0.5871211" ]
0.71007687
1
Method that finds all Live relationships in the data base
def findAllLiveRelationships(tx): query = ( "MATCH (n1:Person)-[r:LIVE]->(n2:House) " "RETURN ID(n1) , r , ID(n2);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relationships(self):", "def findAllGetVaccineRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) \"\n \"RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def findAllVisitRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:VISIT]->(n2:Location) \"\n \"RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def get_all_associations(self):\n return", "def _get_live_entries(self):\n from article.models import Entry\n return self.entry_set.filter(status__exact=Entry.LIVE_STATUS)", "def get_all():\n return SavedQuery.get_all()", "def get_all(self):\n return self.db", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def all(self, datastore):\n return datastore.query(self.__model__).all()", "def findAllAppContactRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:APP_CONTACT]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.hour, ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def retrive(self):\n already_seen_videos = set()\n # create a generator function that spits out video objects one at a time\n for each_query in self.db_query_stack:\n results_of_query = DB.find(each_query)\n print('results_of_query = ', type(results_of_query))\n # this only cares about the keys (video id's)\n unseen_videos = set(results_of_query) - already_seen_videos \n for each_video_id in unseen_videos:\n # output full objects\n yield DatabaseVideo(each_video_id)\n # all the unseen have now been seen\n already_seen_videos |= unseen_videos", "def all(self):\n print('HELLO')\n return self.__model__.query.all()", "def iter_all(self):\n return self.opportunities.find()", "def findAllInfectedRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:COVID_EXPOSURE]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.name , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def findAll(tx):\n query = (\n \"MATCH (n1)-[r]->(n2) \"\n \"RETURN n1 AS node1 , r AS relationship , n2 AS node2 \"\n )\n\n result = tx.run(query)\n return [(record[\"node1\"], record[\"relationship\"], record[\"node2\"]) for record in result]", "def get_activities(cls):\n objs = cls.objects\n return objs", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def GetObjects(self): \r\n return self.model.GetObjects()", "def _get_all_records(self) -> List[DBModelInstance]:\n return self.model.query.all()", "def fetch_all(cls):\n return cls.query.all()", "def get_all_saved():\n return saved.find()", "def resolve_relations(self):\n\n log.debug(\"Start resolving relations\")\n for object_type in NetBoxObject.__subclasses__():\n\n for this_object in self.get_all_items(object_type):\n\n this_object.resolve_relations()\n\n log.debug(\"Finished resolving relations\")", "def all(self):\n\n return self.__model__.query.all()", "def _find_relations(self, node, depth=0):\n depth += 1\n\n model = node.model\n opts = model._meta\n\n # determine relational fields to determine paths\n forward_fields = opts.fields\n reverse_fields = opts.get_all_related_objects()\n\n forward_o2o = filter(self._filter_one2one, forward_fields)\n reverse_o2o = filter(self._filter_related_one2one, reverse_fields)\n\n forward_fk = filter(self._filter_fk, forward_fields)\n reverse_fk = filter(self._filter_related_fk, reverse_fields)\n\n forward_m2m = filter(self._filter_m2m, opts.many_to_many)\n reverse_m2m = filter(self._filter_related_m2m,\n opts.get_all_related_many_to_many_objects())\n\n # iterate m2m relations\n for f in forward_m2m:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'manytomany',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related m2m fields\n for r in reverse_m2m:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'manytomany',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over one2one fields\n for f in forward_o2o:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'onetoone',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related one2one fields\n for r in reverse_o2o:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'onetoone',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over fk fields\n for f in forward_fk:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'foreignkey',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': f.null,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related foreign keys\n for r in reverse_fk:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'foreignkey',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n return node", "def get_all():\n return PushManager.query.all()", "def relations(self):\n return set(self.triples()[\"relation\"])", "def _get_related_objects(obj, parent_class=False):\n foreign_managers = _get_related_managers(obj, parent_class)\n\n related_objects = []\n for manager in foreign_managers:\n related_objects += manager.all()\n\n return related_objects", "def list(self):\n return self.objects.all()", "def read_relationships(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(relationship_query, (person_id,)) # note a tuple is needed as a parameter value for SQLITE\n\n relation_list = []\n for row in c:\n _relation = Relationship()\n _relation.person_id = row[\"personid\"]\n _relation.person.first_name = row[\"firstname\"]\n _relation.person.last_name = row[\"lastname\"]\n _relation.person.middle_initial = row[\"middleinitial\"]\n _relation.related_person_id = row[\"related_personid\"]\n _relation.relationship_id = row[\"relationshipid\"]\n _relation.relationship_type = row[\"relationshiptype\"]\n _relation.relationship_type_description = row[\"key\"]\n relation_list.append(_relation)\n conn.close()\n return relation_list\n except:\n return []", "def find_all(cls):\n return cls.dbm().modelclass_find_all(cls)" ]
[ "0.6515331", "0.60994786", "0.6052908", "0.5807468", "0.5803312", "0.5796571", "0.56822133", "0.5633071", "0.5627949", "0.5613805", "0.5611842", "0.5598031", "0.55815506", "0.5552337", "0.5540224", "0.5523782", "0.5501259", "0.5499945", "0.54917777", "0.54587847", "0.54516184", "0.5450555", "0.5445944", "0.5444191", "0.54378825", "0.54317236", "0.54089105", "0.5404805", "0.5393137", "0.5393137" ]
0.7866796
0
Method that finds all App_Contact relationships in the data base
def findAllAppContactRelationships(tx): query = ( "MATCH (n1:Person)-[r:APP_CONTACT]->(n2:Person) " "RETURN ID(n1) , r , r.date , r.hour, ID(n2);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_contacts(self):\n self.init_db(self._testing)\n\n query = \"SELECT {} FROM {} ORDER BY id;\".format(\", \".join(Contact.columns_with_uid), Contact.table_name)\n\n data = self.db.conn.execute(query)\n\n return [Contact(*item) for item in data]", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def getallcontacts(self):\n feed_url = self.contacts_client.GetFeedUri(projection='full')\n total_read = 0\n while True:\n print('Retrieving contacts... (%d retrieved so far)' % total_read)\n feed = self.contacts_client.get_feed(uri=feed_url,\n auth_token=None,\n desired_class=gdata.contacts.data.ContactsFeed)\n total_read += len(feed.entry)\n for entry in feed.entry:\n yield entry\n next_link = feed.GetNextLink()\n if next_link is None:\n print('All contacts retrieved: %d total' % total_read)\n break\n feed_url = next_link.href", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def list_contacts(self):\n return self.contacts", "def contacts(self):\r\n return contacts.Contacts(self)", "def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts", "def contacts(self, gdi_oids, session):\n contacts = []\n\n ResourceContact = self.config_models.model('resource_contact')\n Contact = self.config_models.model('contact')\n query = session.query(ResourceContact) \\\n .filter(ResourceContact.gdi_oid_resource.in_(gdi_oids)) \\\n .order_by(ResourceContact.id_contact_role)\n # eager load relations\n query = query.options(\n joinedload(ResourceContact.contact)\n .joinedload(Contact.organisation)\n )\n for res_contact in query.all():\n person = res_contact.contact\n person_data = {\n 'id': person.id,\n 'name': person.name,\n 'function': person.function,\n 'email': person.email,\n 'phone': person.phone,\n 'street': person.street,\n 'house_no': person.house_no,\n 'zip': person.zip,\n 'city': person.city,\n 'country_code': person.country_code\n }\n\n organisation_data = None\n organisation = person.organisation\n if organisation is not None:\n organisation_data = {\n 'id': organisation.id,\n 'name': organisation.name,\n 'unit': organisation.unit,\n 'abbreviation': organisation.abbreviation,\n 'street': organisation.street,\n 'house_no': organisation.house_no,\n 'zip': organisation.zip,\n 'city': organisation.city,\n 'country_code': organisation.country_code\n }\n\n contacts.append({\n 'person': person_data,\n 'organisation': organisation_data\n })\n\n return contacts", "def fetch_contacts(owner_account_id):\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/Contacts?$select=id,identifier\")\n contacts = resp.json()\n return contacts", "def contacts(self):\n return ContactCollection(self.request)", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def get_all_associations(self):\n return", "def get_queryset(self):\n return self.request.user.contacts.all()", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=next.href)\n\t\t\tfeeds.append(feed)\n\t\t\tnext = feed.GetNextLink()\n\t\t\n\t\tcontacts = []\n\t\tfor feed in feeds:\n\t\t\tif not feed.entry:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor i, entry in enumerate(feed.entry):\n\t\t\t\t\tcontacts.append(entry)\n\t\treturn contacts", "def get_all_companies_and_people():", "def relationships(self):", "async def get_contacts_for_contact_group(dbcon: DBConnection, contact_group_id: int) -> Iterable[object_models.Contact]:\n q = \"\"\"select\n contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active\n from contact_group_contacts, contacts\n where contact_group_contacts.contact_group_id = %s\n and contact_group_contacts.contact_id = contacts.id\"\"\"\n return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (contact_group_id,))]", "def get_queryset(self):\n contact_data = Contact.objects.filter(contact_groups__in=Member.objects.filter(\n user=self.request.user).values('group_id').distinct())\n\n return contact_data", "def get_all_contacts(self,\n hook,\n resource,\n data=None,\n headers=None,\n extra_options=None):\n all_pages = []\n total_contacts = -1\n next_token = None\n\n while len(all_pages) != total_contacts:\n if not next_token:\n result = hook.run('{}/contacts'.format(resource),\n data,\n headers,\n extra_options).json()\n else:\n result = hook.run('{}/contacts/{}'.format(resource, next_token),\n data,\n headers,\n extra_options).json()\n\n all_pages += result.get('contacts', None)\n\n total_contacts = result.get('total_contacts', None)\n\n if 'bookmark' in result:\n next_token = result.get('bookmark', None)\n\n return all_pages", "def contact_list(self):\n return self._contact_list", "def get_queryset(self):\n user = self.request.user\n return Contact.objects.filter(owner=user)", "def read_relationships(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(relationship_query, (person_id,)) # note a tuple is needed as a parameter value for SQLITE\n\n relation_list = []\n for row in c:\n _relation = Relationship()\n _relation.person_id = row[\"personid\"]\n _relation.person.first_name = row[\"firstname\"]\n _relation.person.last_name = row[\"lastname\"]\n _relation.person.middle_initial = row[\"middleinitial\"]\n _relation.related_person_id = row[\"related_personid\"]\n _relation.relationship_id = row[\"relationshipid\"]\n _relation.relationship_type = row[\"relationshiptype\"]\n _relation.relationship_type_description = row[\"key\"]\n relation_list.append(_relation)\n conn.close()\n return relation_list\n except:\n return []", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def related_contacts(self):\n if \"relatedContacts\" in self._prop_dict:\n return RelatedContactsCollectionPage(self._prop_dict[\"relatedContacts\"])\n else:\n return None", "def get_contacts():\n return jsonify(g.driver.get_contacts())", "def all(self, datastore):\n return datastore.query(self.__model__).all()", "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def contacts(self):\n if \"contacts\" in self._prop_dict:\n return ContactsCollectionPage(self._prop_dict[\"contacts\"])\n else:\n return None", "def find_all(cls):\n return cls.dbm().modelclass_find_all(cls)" ]
[ "0.70344734", "0.66346985", "0.6478732", "0.6478041", "0.63141716", "0.62945706", "0.62855613", "0.62742424", "0.62333655", "0.6192098", "0.6136291", "0.61238366", "0.6116576", "0.6060181", "0.60537875", "0.60507864", "0.6029192", "0.60062426", "0.5840281", "0.58388346", "0.578212", "0.5663287", "0.5647111", "0.5635784", "0.5607297", "0.55764574", "0.5539419", "0.5497792", "0.5486107", "0.54652095" ]
0.75159436
0
Method that finds all VISIT relationships in the data base
def findAllVisitRelationships(tx): query = ( "MATCH (n1:Person)-[r:VISIT]->(n2:Location) " "RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relationships(self):", "def findAllLiveRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:LIVE]->(n2:House) \"\n \"RETURN ID(n1) , r , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def relationship_views(self) -> Iterable[RelationshipView]:\n return set(self._relationship_views)", "def navigations(self):\n return Navigation.objects.filter(page=self)", "def findAllGetVaccineRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) \"\n \"RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def findAllInfectedRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:COVID_EXPOSURE]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.name , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def in_collections(self):\n links = []\n for link in self.link:\n if link.rel == PARENT_LINK_REL and link.href:\n links.append(link)\n return links", "def get_visits(visit_container):\r\n return visit_container.visits.all()", "def relations(self):\n return set(self.triples()[\"relation\"])", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n pass", "def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }", "def related_entities(self):\n related_entities = []\n for point in self.accesspoint_set.all():\n related_entities.append({\n 'name': str(point),\n 'archive_url': point.archive_url,\n 'page_number': point.trigger,\n 'accessed_on': point.accessed_on,\n 'url': reverse_lazy(\n 'update-access-point',\n kwargs={'source_id': self.uuid, 'pk': point.uuid}\n )\n })\n return related_entities", "def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # graphs=Graph()\n # for i in self.users:\n # graphs.add_vertex(i)\n \n # for i in self.users:\n # for x in self.friendships[i]:\n # graphs.add_edge(i,x)\n\n # for i in graphs.vertices:\n # if graphs.bfs(i,user_id):\n # visited[i]=graphs.bfs(i,user_id)\n queue=Queue()\n queue.enqueue([user_id])\n while queue.size()>0:\n path=queue.dequeue()\n current_user = path[-1]\n if current_user not in visited:\n visited[current_user]=path\n for ID in self.friendships[current_user]:\n new_path=list(path)\n new_path.append(ID)\n queue.enqueue(new_path)\n return visited", "def getReachableViews(self):\n return [self]", "def get_all_associations(self):\n return", "def _analyze_relationships(self):\n self._child_map = defaultdict(set)\n self._parent_map = defaultdict(set)\n\n for table, table_meta in self._metadata['tables'].items():\n if table_meta.get('use', True):\n for field_meta in table_meta['fields'].values():\n ref = field_meta.get('ref')\n if ref:\n parent = ref['table']\n self._child_map[parent].add(table)\n self._parent_map[table].add(parent)", "def get_all_social_paths(self, user_id):\n if len(self.friendships) > 0:\n visited = {}\n q = Queue()\n q.enqueue([user_id])\n\n while q.size() > 0:\n curr_path = q.dequeue()\n curr_vertex = curr_path[-1]\n\n if curr_vertex not in visited:\n visited[curr_vertex] = curr_path\n\n for friend in self.friendships[curr_vertex]:\n path_copy = curr_path[:]\n path_copy.append(friend)\n q.enqueue(path_copy)\n\n return visited\n\n else:\n print(\"There are currently no friendship paths in the network\")", "def relations(cls):\n return [c.key for c in cls.__mapper__.iterate_properties\n if isinstance(c, RelationshipProperty)]", "def getAllSocialPaths(self, userID):\n visited = {} # Note that this is a dictionary, not a set\n print(f\"user ID {userID}\")\n\n for i in range(1, len(self.users)):\n visited[i] = self.bfs(userID, i)\n\n return visited", "def links(self):\n\n links = []\n for foreign_key in self.__table__.foreign_keys:\n column = foreign_key.column.name\n column_value = getattr(self, column, None)\n if column_value:\n table = foreign_key.column.table.name\n with app.app_context():\n endpoint = current_app.class_references[table]\n links.append({'rel': 'related', 'uri': '/{}/{}'.format(\n endpoint.__name__, column_value)})\n links.append({'rel': 'self', 'uri': self.resource_uri()})\n return links", "def findAll(tx):\n query = (\n \"MATCH (n1)-[r]->(n2) \"\n \"RETURN n1 AS node1 , r AS relationship , n2 AS node2 \"\n )\n\n result = tx.run(query)\n return [(record[\"node1\"], record[\"relationship\"], record[\"node2\"]) for record in result]", "def references(self):\n return self._get_related_resources(False)", "def _find_relations(self, node, depth=0):\n depth += 1\n\n model = node.model\n opts = model._meta\n\n # determine relational fields to determine paths\n forward_fields = opts.fields\n reverse_fields = opts.get_all_related_objects()\n\n forward_o2o = filter(self._filter_one2one, forward_fields)\n reverse_o2o = filter(self._filter_related_one2one, reverse_fields)\n\n forward_fk = filter(self._filter_fk, forward_fields)\n reverse_fk = filter(self._filter_related_fk, reverse_fields)\n\n forward_m2m = filter(self._filter_m2m, opts.many_to_many)\n reverse_m2m = filter(self._filter_related_m2m,\n opts.get_all_related_many_to_many_objects())\n\n # iterate m2m relations\n for f in forward_m2m:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'manytomany',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related m2m fields\n for r in reverse_m2m:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'manytomany',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over one2one fields\n for f in forward_o2o:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'onetoone',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related one2one fields\n for r in reverse_o2o:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'onetoone',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over fk fields\n for f in forward_fk:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'foreignkey',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': f.null,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related foreign keys\n for r in reverse_fk:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'foreignkey',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n return node", "def get_all_relations(graph, u, v):\n return {\n data[RELATION]\n for data in graph.edge[u][v].values()\n }", "def neighbors(self):\n return self.graph.neighbors(self.id)", "def gather_entities(self):\n entitylist = set()\n for entity in self.entities.all():\n entitylist.add(entity)\n entitylist.update(entity.get_ancestors())\n return entitylist #set(entity for entity in entitylist if not entity.abstract_entity)", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def get_diagnose(visit):\r\n return visit.diagnose.all()", "def compute_relations(self):\n\n visible_nodes = {}\n\n self.cameras = self.get_all_cameras()\n rospy.logdebug(self.cameras)\n\n if self.cameras.items():\n try:\n if self.visibility_monitor is None:\n self.visibility_monitor = VisibilityMonitor(self.ctx, self.source)\n rospy.loginfo(\"[perspective_filter] Visibility monitor now running, please active the Pygame windows.\")\n visible_nodes = self.visibility_monitor.compute_all()\n rospy.logdebug(\"[perspective_filter] %d perspectives computed \" % len(visible_nodes))\n #rospy.logdebug(visible_nodes)\n except Exception as e:\n rospy.logwarn(\"[perspective_filter] Exception occurred while computing relation : %s\" % str(e))\n if self.visibility_monitor:\n self.visible_nodes = {} #visible_nodes\n for camera_name, visibles_obj in visible_nodes.items():\n camera_id = self.source.scene.nodebyname(camera_name)[0].id\n self.visible_nodes[camera_id] = visibles_obj\n for node in visibles_obj:\n if node.parent in self.cameras.keys():\n if self.source.scene.nodes[node.parent] not in visibles_obj:\n visibles_obj.append(self.source.scene.nodes[node.parent])\n\n for agent_id, nodes_seen in self.visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_seen:\n if agent_id in self.previously_visible_nodes:\n if node not in self.previously_visible_nodes[agent_id]:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n for agent_id, nodes_previously_seen in self.previously_visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_previously_seen:\n if agent_id in self.visible_nodes:\n if node not in self.visible_nodes[agent_id]:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n self.publish_perspectives()\n self.previously_visible_nodes = self.visible_nodes", "def _get_connections(self) -> _ConnectionsMap:\n seen: Dict[int, Any] = {}\n for parent in self.target.ancestors:\n if not isinstance(parent, NodeInstance):\n continue\n if parent is self.target.root:\n break\n if self.operation_host:\n self._get_connection(self.operation_host, parent, seen)\n self._get_connection(self.target.root, parent, seen)\n # get the rest of the default connections\n self._get_connection(self.target.root, None, seen)\n\n # reverse so nearest relationships replace less specific ones that have matching names\n connections = _ConnectionsMap( # the list() is for Python 3.7\n (rel.name, rel) for rel in reversed(list(seen.values()))\n )\n return connections" ]
[ "0.670825", "0.6381381", "0.62247133", "0.5903214", "0.58438534", "0.57981044", "0.5787576", "0.5742017", "0.5724101", "0.57024485", "0.5691188", "0.5671603", "0.56212044", "0.56135577", "0.56042075", "0.55915403", "0.5561383", "0.5561318", "0.55501425", "0.5537901", "0.5510554", "0.54817224", "0.5477578", "0.5442459", "0.5442385", "0.54378104", "0.543471", "0.5409552", "0.53957015", "0.53775305" ]
0.7115678
0
Method that finds all GET (a vaccine) relationships in the data base
def findAllGetVaccineRelationships(tx): query = ( "MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) " "RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findAllVaccine(tx):\n query = (\n \"MATCH (v:Vaccine) \"\n \"RETURN v , ID(v);\"\n )\n results = tx.run(query).data()\n return results", "def relationships(self):", "def findAllLiveRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:LIVE]->(n2:House) \"\n \"RETURN ID(n1) , r , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def findAllVisitRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:VISIT]->(n2:Location) \"\n \"RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def get_all_restaurants():\n return list(Restaurant.objects.all().values())", "def get_all_associations(self):\n return", "def get(self):\r\n return get_all()", "def get_all(self):\n url = self._dbname + '/_all'\n return self._connection.get(url).json()", "def assoc_list():\n if not check_content_type():\n return jsonify(status=CONTENT_TYPE_ERROR)\n reqdata = request.json\n if not check_token(reqdata[\"token\"]):\n return jsonify(status=TOKEN_ERROR)\n users_paths = db.session.query(UserPathAssociation).all()\n resdata = []\n for e in users_paths:\n resdata.append({\"id\" : e.id, \"user_id\":e.user_id, \"path_id\":e.path_id})\n return jsonify(data=resdata, status=OK_STATUS)", "def get_all(self):\n return self.__fetcher.get_fetched()", "def get_all_by_id_vaga(id_vaga):\n return Candidatura.query.filter_by(\n id_vaga=id_vaga\n ).all()", "def get_all():\n return SavedQuery.get_all()", "def opt_get_all_models_rest_api():\n return retrieve_all_models()", "def fetch_all(cls):\n return cls.query.all()", "def get_all_cars(self):\n\n all_cars = TheCar.objects.all()\n\n return all_cars", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def get_all(self):\n return {\"parcels\": self.db}, 200", "def findAll(tx):\n query = (\n \"MATCH (n1)-[r]->(n2) \"\n \"RETURN n1 AS node1 , r AS relationship , n2 AS node2 \"\n )\n\n result = tx.run(query)\n return [(record[\"node1\"], record[\"relationship\"], record[\"node2\"]) for record in result]", "def amenity_get_all():\n am_list = []\n am_obj = storage.all(\"Amenity\")\n for obj in am_obj.values():\n am_list.append(obj.to_json())\n\n return jsonify(am_list)", "def getall():\n elements = Advertisements().get_all_elements()\n data = jsonify(elements)\n data.statut_code = 200\n return data", "def all(self, datastore):\n return datastore.query(self.__model__).all()", "def get_cursos(request):\n if request.method == 'GET':\n cursos = Curso.nodes.all()\n cursos_list = []\n for i in range(0, len(cursos)):\n cursos_list.append(cursos[i].__dict__[\"nombre\"])\n return JsonResponse({\"cursos\": cursos_list})", "def all(self):\n print('HELLO')\n return self.__model__.query.all()", "def findAllInfectedRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:COVID_EXPOSURE]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.name , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def get_all(self):\n result_get = GetRest(function = self.function).performRequest()\n return result_get", "def get_many(self, request, **kwargs):\n return []", "def getData(graph, request):\r\n results = list(graph.query(request))\r\n return results", "def findAllAppContactRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:APP_CONTACT]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.hour, ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def list(self):\n return self.objects.all()" ]
[ "0.6645363", "0.6325363", "0.6189448", "0.60453117", "0.6004555", "0.60038614", "0.5948619", "0.58877844", "0.58489853", "0.58127844", "0.5807687", "0.5766347", "0.57608616", "0.57361", "0.571654", "0.5715433", "0.5715096", "0.56999284", "0.5696747", "0.5652339", "0.56353635", "0.56309074", "0.56223464", "0.5609806", "0.5593957", "0.5575863", "0.55744934", "0.5568851", "0.5559752", "0.5558515" ]
0.7728573
0
Method that finds all MAKE (a test) relationships in the data base
def findAllMakeTestRelationships(tx): query = ( "MATCH (n1:Person)-[r:MAKE_TEST]->(n2:Test) " "RETURN ID(n1) , r , r.date , r.hour , r.result , ID(n2);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relationships(self):", "def test_get_relationship_templates(self):\n pass", "def test_find_relation_types(self):\n pass", "def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }", "def find_all(cls):\n return cls.dbm().modelclass_find_all(cls)", "def test_child_relationships(self, init_db, favorite1):\n\n favorite = Favorite.get(id=favorite1.id)\n\n with raises(NotImplementedError) as error:\n favorite.get_child_relationships()\n \n assert str(error.value) == \"The get_relationships method must be overridden in all child model classes\"", "def test_getCpfRelations(self):\n pass", "def findAllTest(tx):\n query = (\n \"MATCH (t:Test) \"\n \"RETURN t , ID(t);\"\n )\n results = tx.run(query).data()\n return results", "def get_concept_list():\n\n DummyConcept.objects.get_or_create(name='Concept A')\n DummyConcept.objects.get_or_create(name='Concept B')\n DummyConcept.objects.get_or_create(name='Concept C')\n DummyConcept.objects.get_or_create(name='Concept D')\n return DummyConcept.objects.all()", "def read_relationships(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(relationship_query, (person_id,)) # note a tuple is needed as a parameter value for SQLITE\n\n relation_list = []\n for row in c:\n _relation = Relationship()\n _relation.person_id = row[\"personid\"]\n _relation.person.first_name = row[\"firstname\"]\n _relation.person.last_name = row[\"lastname\"]\n _relation.person.middle_initial = row[\"middleinitial\"]\n _relation.related_person_id = row[\"related_personid\"]\n _relation.relationship_id = row[\"relationshipid\"]\n _relation.relationship_type = row[\"relationshiptype\"]\n _relation.relationship_type_description = row[\"key\"]\n relation_list.append(_relation)\n conn.close()\n return relation_list\n except:\n return []", "def findAll(tx):\n query = (\n \"MATCH (n1)-[r]->(n2) \"\n \"RETURN n1 AS node1 , r AS relationship , n2 AS node2 \"\n )\n\n result = tx.run(query)\n return [(record[\"node1\"], record[\"relationship\"], record[\"node2\"]) for record in result]", "def find_all(self):\n pass", "def get_all_makes(self):\n return self.get('vehicles/GetAllMakes')", "def get_all_associations(self):\n return", "def find_all(self):", "def findAllLiveRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:LIVE]->(n2:House) \"\n \"RETURN ID(n1) , r , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def test_getResourceRelations(self):\n pass", "def test_get_all_related(self):\n c1 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c1\")\n c2 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c2\")\n # if c1 is related to c2\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c2\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c1)\n self.assertEqual(set(expected_output), set(actual_output))\n # then c2 should be related to c1\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c1\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c2)\n self.assertEqual(set(expected_output), set(actual_output))", "def test_get_related_nodes(self):\n pass", "def getAllRecipes(cls):\n\n rec = db.session.query(Recipe).order_by(Recipe.cat_code).all() \n\n return rec", "def all(self, datastore):\n return datastore.query(self.__model__).all()", "def seed_all():\n seed_client()\n seed_staff()\n seed_request()\n seed_comment()", "def getQuery(self, context, obj):\n return TestModel.all()", "def test_intent_classifier_get_details_all(self):\n pass", "def test_child_relationships(self, init_db, audit):\n audit = Audit.query.first()\n assert audit.get_child_relationships() is None", "def produce_all_database(is_debug):\n\tproduce_database([\"apnea-ecg\", \"train\"], is_debug)\n\tproduce_database([\"apnea-ecg\", \"test\"], is_debug)", "def consultar_todos_DB(self):\n registros = db.session.query(ModelConcurso).all()\n for registro in registros:\n print(registro)", "def get_all() -> list:\n categorias = []\n conn = GenericDao.connect()\n cursor = conn.execute(\"SELECT * FROM categorias\")\n for row in cursor:\n categoria = Categoria(row[1], row[0])\n categorias.append(categoria)\n if debug:\n print(str(categoria))\n\n conn.close()\n return categorias", "def test_get_all_prerequisites(self):\n c1 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c1\")\n root = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"root\")\n # if root is the prerequisite of c1\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"root\"])\n actual_output = api.get_all_prerequisites(channel_id=self.the_channel_id, content=c1)\n self.assertEqual(set(expected_output), set(actual_output))\n # then c1 should not be the prerequisite of root\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c1\"])\n actual_output = api.get_all_prerequisites(channel_id=self.the_channel_id, content=root)\n self.assertNotEqual(set(actual_output), set(expected_output))", "def test_child_relationships(self, init_db, category_with_favorites):\n\n category = Category.get(id=category_with_favorites.id)\n assert category.get_child_relationships() is not None\n assert len(category.favorites.all()) > 0" ]
[ "0.6311003", "0.59542066", "0.5882875", "0.56991184", "0.56488883", "0.56313014", "0.56308293", "0.56160855", "0.555243", "0.5523456", "0.55009925", "0.533547", "0.5330354", "0.5322995", "0.52983755", "0.5294025", "0.529047", "0.52874774", "0.5286571", "0.5281897", "0.5259566", "0.5248126", "0.52441525", "0.522842", "0.52147037", "0.5196952", "0.5181627", "0.51780546", "0.5176058", "0.5159895" ]
0.740138
0
Method that finds all INFECTED relationships in the data base
def findAllInfectedRelationships(tx): query = ( "MATCH (n1:Person)-[r:COVID_EXPOSURE]->(n2:Person) " "RETURN ID(n1) , r , r.date , r.name , ID(n2);" ) results = tx.run(query).data() return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relationships(self):", "def get_all_associations(self):\n return", "def _all_edges(node: FMVGraphNode) -> Set[FMVGraphEdge]:\n rval = set([e for e in node.edges\n if e.predicate not in skip_fhir_predicates and e.type_node.node not in skip_fhir_types])\n for p in node.parents:\n if p.node not in skip_fhir_types:\n rval.update(FHIROntologyTable._all_edges(p))\n return rval", "def gather_entities(self):\n entitylist = set()\n for entity in self.entities.all():\n entitylist.add(entity)\n entitylist.update(entity.get_ancestors())\n return entitylist #set(entity for entity in entitylist if not entity.abstract_entity)", "def get_all_incidents(self):\n sql = f\"SELECT * FROM incidences\"\n curr = Db().cur\n curr.execute(sql)\n output = curr.fetchall()\n return output", "def _relation_check(self):\n seen = set()\n for entity in self.get_entities():\n for field in entity.fields.itervalues():\n if field.is_relation():\n seen.add(field.remote_name)\n missing = seen - set(self.entities.keys())\n if missing:\n raise exceptions.SchemaError(\n 'undefined entities referenced in relations: %s' % (\n ', '.join(missing)))", "def findAllLiveRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:LIVE]->(n2:House) \"\n \"RETURN ID(n1) , r , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def fk_associations(cls):\n return cls._fk_associations", "def findAllGetVaccineRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) \"\n \"RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def relations(self):\n return set(self.triples()[\"relation\"])", "def findAllVisitRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:VISIT]->(n2:Location) \"\n \"RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def _get_invariom_list(self):\n self.invariom_list = []\n for molecule in self.values():\n for atom in molecule.atoms:\n for invariom in atom.invarioms:\n if not invariom in self.invariom_list:\n self.invariom_list.append(invariom)", "def _analyze_relationships(self):\n self._child_map = defaultdict(set)\n self._parent_map = defaultdict(set)\n\n for table, table_meta in self._metadata['tables'].items():\n if table_meta.get('use', True):\n for field_meta in table_meta['fields'].values():\n ref = field_meta.get('ref')\n if ref:\n parent = ref['table']\n self._child_map[parent].add(table)\n self._parent_map[table].add(parent)", "def edges_without_adjacencies(self):\n edges = dict(self.eligible_edges_with_indexes)\n for adj in self.adjacencies.values():\n for edge_info in adj:\n if edge_info.self_edge_index in edges:\n edges[edge_info.self_edge_index] = None\n return list(filter(lambda x: x is not None, edges.values()))", "def relations(cls):\n return [c.key for c in cls.__mapper__.iterate_properties\n if isinstance(c, RelationshipProperty)]", "def iter_recursive_objects(self):\n from noc.inv.models.interface import Interface\n\n for i in Interface.objects.filter(managed_object=self.id):\n yield i", "def get_foreign_keys(self):\n query = mssqlqueries.get_foreignkeys()\n logger.info(u'Foreign keys query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield ForeignKey(*row)", "def findAllAppContactRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:APP_CONTACT]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.hour, ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def test_NotIncluded(self):\n for ind in self.not_included:\n db_ind = Individual.query.filter_by(name=ind).all()\n self.assertEqual(db_ind, [])", "def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "def get_edges(self):\n return \\\n set({\n edge\n for node in self.nodeset\n for edge in node.get_incident_edges()\n })", "def get_all_exclusives(self):\r\n if self.exclusives is None:\r\n self._propagate_exclusives()\r\n return self.exclusives", "def iter_all(self):\n return self.opportunities.find()", "def iteridents(self):\n raise NotImplementedError", "def eligible_edges(self):\n return self.edges", "def _find_relations(self, node, depth=0):\n depth += 1\n\n model = node.model\n opts = model._meta\n\n # determine relational fields to determine paths\n forward_fields = opts.fields\n reverse_fields = opts.get_all_related_objects()\n\n forward_o2o = filter(self._filter_one2one, forward_fields)\n reverse_o2o = filter(self._filter_related_one2one, reverse_fields)\n\n forward_fk = filter(self._filter_fk, forward_fields)\n reverse_fk = filter(self._filter_related_fk, reverse_fields)\n\n forward_m2m = filter(self._filter_m2m, opts.many_to_many)\n reverse_m2m = filter(self._filter_related_m2m,\n opts.get_all_related_many_to_many_objects())\n\n # iterate m2m relations\n for f in forward_m2m:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'manytomany',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related m2m fields\n for r in reverse_m2m:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'manytomany',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over one2one fields\n for f in forward_o2o:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'onetoone',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related one2one fields\n for r in reverse_o2o:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'onetoone',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over fk fields\n for f in forward_fk:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'foreignkey',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': f.null,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related foreign keys\n for r in reverse_fk:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'foreignkey',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n return node", "def test_child_relationships(self, init_db, audit):\n audit = Audit.query.first()\n assert audit.get_child_relationships() is None", "def prepare_related_incidents(self, object):\n roles = ActorRole.objects.filter(\n actor=object.id).filter(incident__isnull=False)\n\n related_incidents = [\n '/api/v1/incident/{0}/'.format(b.id)\n for ar in roles\n for b in ar.incident_set.all()\n ]\n\n return related_incidents" ]
[ "0.6335131", "0.6312678", "0.5775378", "0.57695436", "0.5733898", "0.57229775", "0.57219905", "0.5700881", "0.5683474", "0.5677579", "0.5643423", "0.5604486", "0.5525296", "0.55013794", "0.5481556", "0.544633", "0.5422407", "0.5419492", "0.5403143", "0.5386868", "0.53759784", "0.53099376", "0.53079164", "0.52996993", "0.52841276", "0.5263138", "0.525919", "0.52575415", "0.5252139", "0.52459407" ]
0.66463846
0
Method that creates the query for the creation of the vaccines node
def createNodeVaccines(vaccinesList): vaccinesQuery = [] for vaccineEl in vaccinesList: currentQuery = ( "CREATE (v:Vaccine {name: \"" + str(vaccineEl[int(VaccineAttribute.NAME)]) + "\" , producer: \"" + str(vaccineEl[int(VaccineAttribute.PRODUCER)]) + "\"}); " ) vaccinesQuery.append(currentQuery) return vaccinesQuery
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createGettingVaccine(tx, query, personId, vaccineId, date, country, expDate):\n tx.run(query, personId=personId, vaccineId=vaccineId, date=date, country=country, expDate=expDate)", "def make_query(self):", "def createRelationshipsGetVaccine(d, pIds, vIds):\n # Choose how many new visit relationships\n numberOfVaccines = MAX_NUMBER_OF_VACCINE\n\n for _ in range(0, numberOfVaccines):\n vIndex = randint(0, len(vIds) - 1)\n vaccineId = vIds[vIndex]\n pIndex = randint(0, len(pIds) - 1)\n personId = pIds[pIndex]\n date = datetime.date.today() - datetime.timedelta(days=randint(0, VACCINES_DAYS_BACKS))\n country = \"Italy\"\n # For the future: maybe do a random country\n # Ask to neo4j server how many vaccines the user did\n query = (\n \"MATCH (p:Person)-[r]->(v:Vaccine) \"\n \"WHERE ID(p) = $personId AND type(r)='GET_VACCINE'\"\n \"RETURN count(p) as count,ID(v) as vaccineID,r.expirationDate as date\"\n )\n with d.session() as s:\n datas = s.read_transaction(gettingNumberVaccines, query, personId)\n\n # if no vaccines do one, else make the second vaccine\n if len(datas) == 0:\n string2 = str(date + datetime.timedelta(days=28)).split(\"-\")\n expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2]))\n\n else:\n if len(datas) == 1:\n string1 = str(datas[0][\"date\"]).split(\"-\")\n date = datetime.date(int(string1[0]), int(string1[1]), int(string1[2]))\n string2 = str(date + datetime.timedelta(days=365)).split(\"-\")\n expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2]))\n\n vaccineId = datas[0][\"vaccineID\"]\n else:\n continue\n date = date.strftime(\"%Y-%m-%d\")\n expDate = expDate.strftime(\"%Y-%m-%d\")\n\n query = (\n \"MATCH (p:Person) , (v:Vaccine) \"\n \"WHERE ID(p) = $personId AND ID(v) = $vaccineId \"\n \"MERGE (p)-[:GET_VACCINE{date:date($date),country:$country,expirationDate:date($expDate)}]->(v); \"\n )\n\n # Execute the query\n with d.session() as s:\n s.write_transaction(createGettingVaccine, query, personId, vaccineId, date, country, expDate)", "def generate_query(self):\n return", "def get_query():\n return CiscoVlanIftableRelationshipQuery", "def cypher_create():\n graph.cypher.execute(\"CREATE (a:Person {name:{N}})\", {\"N\": \"yangyy\"})", "def query(self, query):", "def CreateQueryTransaction(self, dest):\n c = Query(dest, self.node_id)\n self.connections.append((\"QUERY\", c))\n return c", "def init_query(snmp_object):\n return CiscoVlanIftableRelationshipQuery(snmp_object)", "def findAllVaccine(tx):\n query = (\n \"MATCH (v:Vaccine) \"\n \"RETURN v , ID(v);\"\n )\n results = tx.run(query).data()\n return results", "def _make_query(self):\r\n raise NotImplementedError()", "def buildQueryVector(self, termList):\n\t\tquery = self.createVector(\" \".join(termList))\n\t\treturn query", "def query(self):", "def _create_new_query(request, template):\n # from the template, we get the version manager\n template_version_manager = template_version_manager_api.get_by_version_id(str(template.id))\n # from the version manager, we get all the version\n template_ids = template_api.get_all_by_id_list(template_version_manager.versions)\n # create query\n query = create_default_query(request, template_ids)\n # then upsert\n return query_api.upsert(query)", "def findAllGetVaccineRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) \"\n \"RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def junos_cve_query(version):\n pass", "def visit_query(self, query):\n return query", "def calcQueryVector(self):\n query = input(\"Query: \");\n ana = StemmingAnalyzer() ### lowercases, stems, ignores stopwords\n tokens = [token.text for token in ana(query)]\n\n queryVector = {}\n for token in tokens:\n if token in self.invertedIndex.keys():\n if token in queryVector.keys():\n queryVector[token]+=1;\n else:\n queryVector[token] = 1;\n\n return self.normalizeQueryVector(queryVector);", "def query3() :", "def create_vrf(self, parent_dn, vrf_name):\n Ctx_mo = Ctx(parent_dn, vrf_name)\n self.commit(Ctx_mo)\n return Ctx_mo", "def __init__(self, orKeywords=True, baseurl=None, version=\"1.0\"):\n if not baseurl: baseurl = RegistryService._STSCI_REGISTRY_BASEURL\n dalquery.DALQuery.__init__(self, baseurl, \"vaoreg\", version)\n self._kw = [] # list of individual keyword phrases\n self._preds = [] # list of SQL predicates\n self._svctype = None\n self._band = None\n self._orKw = orKeywords\n self._doSort = True\n self._dalonly = False", "def query(self):\n pass", "def create(self, query, callback=None, query_args=None):\r\n data = self.db.execute(query, query_args)\r\n return data", "def _gen_cat_query(self,query_fields=None):\n if query_fields is None:\n object_id_fields = ['decals_id','brick_primary','brickid','ra','dec','gaia_pointsource']\n mag_fields = ['mag_g','mag_r','mag_z','mag_w1','mag_w2','mag_w3','mag_w4']\n snr_fields = ['snr_g','snr_r','snr_z','snr_w1','snr_w2','snr_w3','snr_w4']\n query_fields = object_id_fields+mag_fields+snr_fields\n \n database = \"ls_dr7.tractor\"\n self.query = dlsurvey._default_query_str(query_fields, database, self.coord, self.radius)", "def createVisit(tx, query, personId, locationId, date, startHour, endHour):\n tx.run(query, personId=personId, locationId=locationId, date=date, startHour=startHour,\n endHour=endHour)", "def gettingNumberVaccines(tx, query, personId):\n return tx.run(query, personId=personId).data()", "def createNodeTests(testsList):\n testsQuery = []\n for testEl in testsList:\n currentQuery = (\n \"CREATE (t:Test {name: \\\"\" + str(testEl) + \"\\\"}); \"\n )\n testsQuery.append(currentQuery)\n return testsQuery", "def run_cypher_query(self, query):\n with self._driver.session() as session:\n session.write_transaction(self.add_input_graph, query)", "def create_schema(client):\n base = WOQLQuery().doctype(\"EphemeralEntity\").label(\"Ephemeral Entity\").description(\"An entity that has a lifespan\")\n base.property(\"lifespan_start\", \"dateTime\").label(\"Existed From\")\n base.property(\"lifespan_end\", \"dateTime\").label(\"Existed To\")\n \n country = WOQLQuery().add_class(\"Country\").label(\"Country\").description(\"A nation state\").parent(\"EphemeralEntity\")\n country.property(\"iso_code\", \"string\").label(\"ISO Code\")\n country.property(\"fip_code\", \"string\").label(\"FIP Code\") \n\n airline = WOQLQuery().add_class(\"Airline\").label(\"Airline\").description(\"An operator of airplane flights\").parent(\"EphemeralEntity\")\n airline.property(\"registered_in\", \"Country\").label(\"Registered In\"),\n \n airport = WOQLQuery().add_class(\"Airport\").label(\"Airport\").description(\"An airport where flights terminate\").parent(\"EphemeralEntity\")\n airport.property(\"situated_in\", \"Country\").label(\"Situated In\"),\n \n flight = WOQLQuery().add_class(\"Flight\").label(\"Flight\").description(\"A flight between airports\").parent(\"EphemeralEntity\")\n flight.property(\"departs\", \"Airport\").label(\"Departs\")\n flight.property(\"arrives\", \"Airport\").label(\"Arrives\")\n flight .property(\"operated_by\", \"Airline\").label(\"Operated By\") \n\n schema = WOQLQuery().when(True).woql_and(base, country, airline, airport, flight)\n return schema.execute(client)", "def vrules(self):\n ..." ]
[ "0.7164774", "0.5819601", "0.57299334", "0.56802905", "0.54112285", "0.5248462", "0.5180487", "0.51707035", "0.51623213", "0.51599586", "0.5150354", "0.5147532", "0.5021065", "0.49900708", "0.49644697", "0.49570605", "0.4934976", "0.49241465", "0.48584074", "0.478696", "0.47686562", "0.47405937", "0.47258765", "0.47243983", "0.47243285", "0.47152805", "0.4707631", "0.4694497", "0.46880308", "0.4678967" ]
0.76611483
0
Method that creates VISIT relationships
def createRelationshipsVisit(d, pIds, lIds): # Choose how many new visit relationships numberOfVisits = MAX_NUMBER_OF_VISIT for _ in range(0, numberOfVisits): lIndex = randint(0, len(lIds) - 1) locationId = lIds[lIndex] pIndex = randint(0, len(pIds) - 1) personId = pIds[pIndex] # Choose the hour/date date = datetime.date.today() - datetime.timedelta(days=randint(0, VISITS_DAYS_BACKS)) date = date.strftime("%Y-%m-%d") h = randint(0, 22) minutes = randint(0, 59) if minutes < 10: minutes = "0" + str(minutes) startHour = str(h) + ":" + str(minutes) h = randint(h, 23) minutes = randint(0, 59) if minutes < 10: minutes = "0" + str(minutes) endHour = str(h) + ":" + str(minutes) n = 0 while not validateDate(d, date, personId, endHour) and n < MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE: date = datetime.date.today() - datetime.timedelta(days=randint(0, 150)) date = date.strftime("%Y-%m-%d") h = randint(0, 22) minutes = randint(0, 59) if minutes < 10: minutes = "0" + str(minutes) startHour = str(h) + ":" + str(minutes) h = randint(h, 23) minutes = randint(0, 59) if minutes < 10: minutes = "0" + str(minutes) endHour = str(h) + ":" + str(minutes) n = n + 1 if n == MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE: continue query = ( "MATCH (p:Person) , (l:Location) " "WHERE ID(p) = $personId AND ID(l) = $locationId " "MERGE (p)-[:VISIT {date: date($date) , start_hour: time($startHour) , end_hour: time($endHour)}]->(l); " ) # Execute the query with d.session() as s: s.write_transaction(createVisit, query, personId, locationId, date, startHour, endHour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relationships(self):", "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def findAllVisitRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:VISIT]->(n2:Location) \"\n \"RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def createRelationshipsInfect(id, test_date, test_hour, daysBack):\n familyQuery = (\n \"MATCH (pp:Person)-[:LIVE]->(h:House)<-[:LIVE]-(ip:Person) \"\n \"WHERE ID(pp) = $id AND ip <> pp AND NOT (ip)<-[:COVID_EXPOSURE]-(pp)\"\n \"RETURN DISTINCT ID(ip);\"\n )\n\n \"\"\"\n IMPORTANT: ($date) represents the date from which we check the contacts. It is the date of positive test - 7 days\n We check all contacts until the date of positive test\n \"\"\"\n appContactQuery = (\n \"MATCH (pp:Person)-[r1:APP_CONTACT]->(ip:Person) \"\n \"WHERE ID(pp) = $id AND (r1.date > date($date) OR (r1.date = date($date) AND r1.hour >= time($hour))) \"\n \"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND \"\n \"r1.hour <= time($hour))) \"\n \"AND NOT \"\n \"(pp)-[:COVID_EXPOSURE{date: r1.date}]->(ip)\"\n \"RETURN DISTINCT ID(ip) , r1.date;\"\n )\n locationContactQuery = (\n \"MATCH (pp:Person)-[r1:VISIT]->(l:Location)<-[r2:VISIT]-(ip:Person) \"\n \"WHERE ID(pp) = $id AND ip <> pp AND (r1.date > date($date) OR (r1.date = date($date) AND r1.start_hour >= time($hour))) \"\n \"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND \"\n \"r1.end_hour <= time($hour))) AND r2.date = r1.date AND \"\n \"((r1.start_hour < r2.start_hour AND r1.end_hour > r2.start_hour) OR \"\n \"(r2.start_hour < r1.start_hour AND r2.end_hour > r1.start_hour)) AND NOT \"\n \"(pp)-[:COVID_EXPOSURE{name: l.name , date: r1.date}]->(ip)\"\n \"RETURN DISTINCT ID(ip) , r1.date , l.name;\"\n )\n\n # date = datetime.date.today() - datetime.timedelta(daysBack)\n \"\"\"\n date is referred to date test - daysback \n \"\"\"\n date = test_date - datetime.timedelta(daysBack)\n infectedIds = []\n with driver.session() as s:\n familyInfected = s.read_transaction(findInfectInFamily, familyQuery, id)\n appInfected = s.read_transaction(findInfect, appContactQuery, id, date, test_hour)\n locationInfected = s.read_transaction(findInfect, locationContactQuery, id, date, test_hour)\n\n for el in familyInfected, appInfected, locationInfected:\n if len(el) > 0:\n # Take just the id\n infectedIds.append(el[0]['ID(ip)'])\n\n infectedIds = []\n for el in familyInfected:\n infectedIds.append(el['ID(ip)'])\n\n for infectedId in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date:date($date)}]->(ip);\"\n )\n s.write_transaction(createInfectFamily, query, id, infectedId, date.strftime(\"%Y-%m-%d\"))\n\n infectedIds = []\n for el in appInfected:\n details = []\n details.append(el['ID(ip)'])\n details.append(el['r1.date'])\n infectedIds.append(details)\n\n for infectedId, infectedDate in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date: date($date)}]->(ip);\"\n )\n s.write_transaction(createInfectApp, query, id, infectedId, infectedDate)\n\n infectedIds = []\n\n for el in locationInfected:\n details = []\n details.append(el['ID(ip)'])\n details.append(el['r1.date'])\n details.append(el['l.name'])\n infectedIds.append(details)\n\n for infectedId, infectedDate, infectedPlace in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date: date($date) , name: $name}]->(ip);\"\n )\n s.write_transaction(createInfectLocation, query, id, infectedId, infectedDate, infectedPlace)", "def createRelationshipsGetVaccine(d, pIds, vIds):\n # Choose how many new visit relationships\n numberOfVaccines = MAX_NUMBER_OF_VACCINE\n\n for _ in range(0, numberOfVaccines):\n vIndex = randint(0, len(vIds) - 1)\n vaccineId = vIds[vIndex]\n pIndex = randint(0, len(pIds) - 1)\n personId = pIds[pIndex]\n date = datetime.date.today() - datetime.timedelta(days=randint(0, VACCINES_DAYS_BACKS))\n country = \"Italy\"\n # For the future: maybe do a random country\n # Ask to neo4j server how many vaccines the user did\n query = (\n \"MATCH (p:Person)-[r]->(v:Vaccine) \"\n \"WHERE ID(p) = $personId AND type(r)='GET_VACCINE'\"\n \"RETURN count(p) as count,ID(v) as vaccineID,r.expirationDate as date\"\n )\n with d.session() as s:\n datas = s.read_transaction(gettingNumberVaccines, query, personId)\n\n # if no vaccines do one, else make the second vaccine\n if len(datas) == 0:\n string2 = str(date + datetime.timedelta(days=28)).split(\"-\")\n expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2]))\n\n else:\n if len(datas) == 1:\n string1 = str(datas[0][\"date\"]).split(\"-\")\n date = datetime.date(int(string1[0]), int(string1[1]), int(string1[2]))\n string2 = str(date + datetime.timedelta(days=365)).split(\"-\")\n expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2]))\n\n vaccineId = datas[0][\"vaccineID\"]\n else:\n continue\n date = date.strftime(\"%Y-%m-%d\")\n expDate = expDate.strftime(\"%Y-%m-%d\")\n\n query = (\n \"MATCH (p:Person) , (v:Vaccine) \"\n \"WHERE ID(p) = $personId AND ID(v) = $vaccineId \"\n \"MERGE (p)-[:GET_VACCINE{date:date($date),country:$country,expirationDate:date($expDate)}]->(v); \"\n )\n\n # Execute the query\n with d.session() as s:\n s.write_transaction(createGettingVaccine, query, personId, vaccineId, date, country, expDate)", "def _add_relationships(self, element: Element) -> None:\n elements: Set[str] = {v.id for v in self.element_views}\n\n for relationship in element.get_efferent_relationships():\n if relationship.destination.id in elements:\n self._relationship_views.add(\n RelationshipView(relationship=relationship)\n )\n\n for relationship in element.get_afferent_relationships():\n if relationship.source.id in elements:\n self._relationship_views.add(\n RelationshipView(relationship=relationship)\n )", "def populate_graph(self):", "def compute_relations(self):\n\n visible_nodes = {}\n\n self.cameras = self.get_all_cameras()\n rospy.logdebug(self.cameras)\n\n if self.cameras.items():\n try:\n if self.visibility_monitor is None:\n self.visibility_monitor = VisibilityMonitor(self.ctx, self.source)\n rospy.loginfo(\"[perspective_filter] Visibility monitor now running, please active the Pygame windows.\")\n visible_nodes = self.visibility_monitor.compute_all()\n rospy.logdebug(\"[perspective_filter] %d perspectives computed \" % len(visible_nodes))\n #rospy.logdebug(visible_nodes)\n except Exception as e:\n rospy.logwarn(\"[perspective_filter] Exception occurred while computing relation : %s\" % str(e))\n if self.visibility_monitor:\n self.visible_nodes = {} #visible_nodes\n for camera_name, visibles_obj in visible_nodes.items():\n camera_id = self.source.scene.nodebyname(camera_name)[0].id\n self.visible_nodes[camera_id] = visibles_obj\n for node in visibles_obj:\n if node.parent in self.cameras.keys():\n if self.source.scene.nodes[node.parent] not in visibles_obj:\n visibles_obj.append(self.source.scene.nodes[node.parent])\n\n for agent_id, nodes_seen in self.visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_seen:\n if agent_id in self.previously_visible_nodes:\n if node not in self.previously_visible_nodes[agent_id]:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n for agent_id, nodes_previously_seen in self.previously_visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_previously_seen:\n if agent_id in self.visible_nodes:\n if node not in self.visible_nodes[agent_id]:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n self.publish_perspectives()\n self.previously_visible_nodes = self.visible_nodes", "def create_graph(self, lat, lon):\n # Open connection to the database (nodes)\n cur = armaps.model.get_db()\n\n # Get the waypoints\n cur.execute(\n \"SELECT * FROM waypoints WHERE venue_id = %s\", \n (self.venue_id,)\n )\n waypoints = cur.fetchall()\n\n # Get the paths (edges)\n cur.execute(\n \"SELECT * FROM paths WHERE venue_id = %s\",\n (self.venue_id,)\n )\n paths = cur.fetchall()\n\n # Transform list of waypoints into dictionary with key = waypoint_id\n for waypoint in waypoints:\n self.waypoints[int(waypoint[\"waypoint_id\"])] = {\n \"lat\": float(waypoint[\"latitude\"]),\n \"lon\": float(waypoint[\"longitude\"]),\n \"waypoint_id\": int(waypoint[\"waypoint_id\"])\n }\n\n # Calculate weights of edges in graph\n for path in paths:\n # Get two nodes (waypoints) associated with edge\n inNode = int(path[\"innode\"])\n outNode = int(path[\"outnode\"])\n\n # Get the coordinates of nodes\n inNode_coords = (self.waypoints[inNode][\"lat\"], self.waypoints[inNode][\"lon\"])\n outNode_coords = (self.waypoints[outNode][\"lat\"], self.waypoints[outNode][\"lon\"])\n distance = geopy.distance.distance(inNode_coords, outNode_coords).miles\n\n # Add to graph (both ways for undirected)\n self.graph.add_edge(inNode, outNode, distance)\n self.graph.add_edge(outNode, inNode, distance)", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def _add_dominance_relation__to__nodes(self):\n dominating_dict = defaultdict(list)\n dominated_dict = defaultdict(list)\n for dom_rel_id in self._dominance_relation_ids:\n dominated_node_id = self.edges[dom_rel_id].target\n dominating_node_id = self.edges[dom_rel_id].source\n dominating_dict[dominating_node_id].append(dominated_node_id)\n dominated_dict[dominated_node_id].append(dominating_node_id)\n\n for dominating_node_id in dominating_dict:\n self.nodes[dominating_node_id].dominates = \\\n dominating_dict[dominating_node_id]\n for dominated_node_id in dominated_dict:\n self.nodes[dominated_node_id].dominated_by = \\\n dominated_dict[dominated_node_id]", "def gen_graph(self):", "def _generate_ribs(self):\n for fw in self._fw_rules:\n source_tag = fw['source_tag']\n dest_tag = fw['dest_tag']\n\n for source_vm_index in self._tag_owners[source_tag]:\n for dest_vm_index in self._tag_owners[dest_tag]:\n # Add to each vertex access ability nodes\n self._graph[source_vm_index].add(dest_vm_index)", "def create_relationships(self, source_nodes, target_nodes):\n source_nodes = (source_nodes,) if isinstance(source_nodes, Node) else source_nodes\n target_nodes = (target_nodes,) if isinstance(target_nodes, Node) else target_nodes\n for source_node in source_nodes:\n for target_node in target_nodes:\n self.create_relationship(source_node, target_node)", "def createRelationshipsMakeTest(d, pIds, tIds):\n # Choose how many new visit relationships\n numberOfTest = MAX_NUMBER_OF_TEST\n\n for _ in range(0, numberOfTest):\n probability = random()\n tIndex = randint(0, len(tIds) - 1)\n testId = tIds[tIndex]\n pIndex = randint(0, len(pIds) - 1)\n personId = pIds[pIndex]\n date = datetime.date.today() - datetime.timedelta(days=randint(0, TESTS_DAYS_BACKS))\n h = randint(0, 23)\n minutes = randint(0, 59)\n if minutes < 10:\n minutes = \"0\" + str(minutes)\n string_date = date.strftime(\"%Y-%m-%d\")\n hour = str(h) + \":\" + str(minutes)\n\n if probability < PROBABILITY_TO_BE_POSITIVE:\n result = \"Positive\"\n else:\n result = \"Negative\"\n\n query = (\n \"MATCH (p:Person) , (t:Test) \"\n \"WHERE ID(p) = $personId AND ID(t) = $testId \"\n \"MERGE (p)-[:MAKE_TEST{date:date($date) , hour: time($hour) ,result:$result}]->(t); \"\n )\n\n # If negative, all infections have to be neglected\n if probability >= PROBABILITY_TO_BE_POSITIVE:\n # Check whether or not I have been infected by someone\n delete_possible_infection_command = (\n \"MATCH ()-[i:COVID_EXPOSURE]->(p:Person)\"\n \"WHERE ID(p) = $personId AND (date($date) >= i.date + duration({days: 7})) \"\n \"DELETE i\"\n )\n with d.session() as s:\n s.write_transaction(delete_possible_infection, delete_possible_infection_command,\n personId, string_date, hour)\n # Execute the query\n with d.session() as s:\n s.write_transaction(createMakingTest, query, personId, testId, string_date, hour, result)", "def createGraph(self):\n \n for episode in self.episodes:\n listeSuccessors = [episode[episode[:,1] > episode[i,1]][:,0] # List of list of successors for each user\n for i in range(len(episode))] \n for i, successeur in enumerate(listeSuccessors): # for the list of successors of each user\n for v in successeur: # for every successor of a user\n u, proba = episode[i,0], np.random.random() # Generate a probability so within (0,1)\n self.successors[u][v] = proba # u ---(proba)---> v \n self.predecessors[v][u] = proba # v ---(proba)---> u", "def meshRelationships(Objects):\r\n # Create some variables to be used to store objects\r\n foreheadVariable = []\r\n noseBridgeVariable = []\r\n noseVariable = []\r\n eyeVariable = []\r\n mouthLoopVariable = []\r\n mouthVariable = []\r\n cheekVariable = []\r\n chinVariable = []\r\n earVariable = []\r\n backHeadVariable = []\r\n lowerBackHeadVariable = []\r\n\r\n # Create the relationshipList\r\n relationshipList = []\r\n\r\n for forehead in Objects:\r\n if \"TubxForehead_geo_\" in forehead:\r\n foreheadVariable.append(forehead)\r\n\r\n for noseBridge in Objects:\r\n if \"TubxNoseBridge_geo_\" in noseBridge:\r\n noseBridgeVariable.append(noseBridge)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, noseBridge, forehead)\r\n\r\n for eye in Objects:\r\n if \"TubxEye_geo_\" in eye:\r\n eyeVariable.append(eye)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, eye, forehead)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, eye, noseBridge)\r\n\r\n for nose in Objects:\r\n if \"TubxNose_geo_\" in nose:\r\n noseVariable.append(nose)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, nose, noseBridge)\r\n\r\n for mouthLoop in Objects:\r\n if \"TubxMouthLoop_geo_\" in mouthLoop:\r\n mouthLoopVariable.append(mouthLoop)\r\n for nose in noseVariable:\r\n createRelationships(relationshipList, mouthLoop, nose)\r\n\r\n for mouth in Objects:\r\n if \"TubxMouth_geo_\" in mouth:\r\n mouthVariable.append(mouth)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, mouth, mouthLoop)\r\n\r\n for cheek in Objects:\r\n if \"TubxCheek_geo_\" in cheek:\r\n cheekVariable.append(cheek)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, cheek, mouthLoop)\r\n\r\n for chin in Objects:\r\n if \"TubxChin_geo_\" in chin:\r\n chinVariable.append(chin)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, chin, mouthLoop)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, chin, cheek)\r\n\r\n for ear in Objects:\r\n if \"TubxEar_geo_\" in ear:\r\n earVariable.append(ear)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, ear, forehead)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, ear, cheek)\r\n\r\n for backhead in Objects:\r\n if \"TubxBackHead_geo_\" in backhead:\r\n backHeadVariable.append(backhead)\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, backhead, forehead)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, backhead, ear)\r\n\r\n for lowerbackhead in Objects:\r\n if \"TubxLowerBackHead_geo_\" in lowerbackhead:\r\n lowerBackHeadVariable.append(lowerbackhead)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, lowerbackhead, ear)\r\n for backhead in backHeadVariable:\r\n createRelationships(relationshipList, lowerbackhead, backhead)\r\n\r\n for default in Objects:\r\n for forehead in foreheadVariable:\r\n createRelationships(relationshipList, default, forehead)\r\n for noseBridge in noseBridgeVariable:\r\n createRelationships(relationshipList, default, noseBridge)\r\n for nose in noseVariable:\r\n createRelationships(relationshipList,default,nose)\r\n for eye in eyeVariable:\r\n createRelationships(relationshipList, default, eye)\r\n for mouthLoop in mouthLoopVariable:\r\n createRelationships(relationshipList, default, mouthLoop)\r\n for mouth in mouthVariable:\r\n createRelationships(relationshipList, default, mouth)\r\n for cheek in cheekVariable:\r\n createRelationships(relationshipList, default, cheek)\r\n for chin in chinVariable:\r\n createRelationships(relationshipList, default, chin)\r\n for ear in earVariable:\r\n createRelationships(relationshipList, default, ear)\r\n for backhead in backHeadVariable:\r\n createRelationships(relationshipList, default, backhead)\r\n for lowerbackhead in lowerBackHeadVariable:\r\n createRelationships(relationshipList, default, lowerbackhead)\r\n\r\n return relationshipList", "def create_social_graph(file):\n social_graph = NonDirectionalGraph(\"SocialGraph\")\n with open(file, \"rt\") as f:\n data = f.readlines()\n n_friendship = 0 # Represents the number of friendships in the graph in each iteration\n highest_n_friendship = 0 # Captures the highest record of n_friendship in the graph\n highest_n_neighbors_per_node_dict = {} # Captures the highest record of friendship per node\n for line in data:\n split_line = line.split()\n if \"became\" in split_line: # \"became\" is in lines where persons become connected\n for name in [split_line[0], split_line[2]]:\n # The following if statement makes sure to instantiate the node and adds it to the graph\n if name not in social_graph:\n node = Node(name)\n social_graph.add_node(node)\n highest_n_neighbors_per_node_dict[name] = 0 ##\n social_graph.add_edge(split_line[0],split_line[2]) # Adds a connection between the nodes\n n_friendship += 1 # Updates the number of friendships\n # The following for loop updates the highest number of friends (neighbors) if it changes\n for name in [split_line[0], split_line[2]]:\n if len(social_graph.nodes[name].neighbors) > highest_n_neighbors_per_node_dict[name]:\n highest_n_neighbors_per_node_dict[name] = len(social_graph.nodes[name].neighbors)\n elif \"cancelled\" in split_line: # \"became\" is in lines where persons become disconnected\n social_graph.remove_edge(split_line[0], split_line[2])\n n_friendship -= 1 # Updates the number of friendships\n # In case any of the words \"cancelled\" or \"became\" is in the line\n else:\n print(\"Unrecognized line\")\n # The following for loop updates the highest number of friendship if it changes\n if n_friendship > highest_n_friendship:\n highest_n_friendship = n_friendship\n return social_graph, highest_n_friendship, highest_n_neighbors_per_node_dict", "def _create_nx_graph(self):\n #_graph = nx.Graph()\n graph = nx.DiGraph()\n for name, lemma in self._lemmas_info.get_parent_lemmas():\n added_children = []\n for child_n in lemma.evidence_lemmas:\n child_node = str(child_n)\n if not self._should_be_filtered( added_children, child_node ):\n added_children.append( child_node )\n \n graph.add_node( name ) # it's OK if it exists from the previous iteration\n graph.add_node( child_node )\n # lemma1 because lemma2, means that lemma2 -> lemma1\n graph.add_edge( child_node, name )\n \n self._append_source_and_target( graph )\n return graph", "def create_graph(users, friend_counts):\n ###TODO-- Completed\n G = nx.Graph()\n\n #For Filtering the Nodes\n #print(friend_counts)\n friend_nodes = [friend for friend in friend_counts if friend_counts[friend] > 1]\n candidate_nodes = [user['screen_name'] for user in users]\n\n #print(\"Nodes: \",len(friend_nodes), len(candidate_nodes))\n #Adding Nodes to graph\n G.add_nodes_from(friend_nodes + candidate_nodes)\n\n #Connecting the Nodes with Edges\n for candidate in users:\n for friend in friend_nodes:\n if friend in candidate['friends']:\n G.add_edge(candidate['screen_name'], friend)\n\n return G", "def contribs2edges():\r\n client = mongo.MongoClient(config[\"MONGO_URI\"])\r\n db = client.links\r\n db.edges.remove()\r\n edges = dict()\r\n for contrib in db.contribs.find():\r\n for item in contrib[\"data\"]:\r\n id = u\"{} {}\".format(item[\"name_1\"], item[\"name_2\"]).replace(\" \", \"_\")\r\n edge = edges.get(id) #db.edges.find_one({\"_id\" : id}))\r\n if not edge:\r\n edge = {\"_id\" : id, \"name_1\" : item[\"name_1\"], \"name_2\" : item[\"name_2\"], \"tags\" : []}\r\n for tag in item[\"tags\"]:\r\n edge_tag = filter(lambda x: x[\"name\"] == tag, edge[\"tags\"])\r\n if len(edge_tag):\r\n edge_tag = edge_tag[0]\r\n else:\r\n edge_tag = {\"name\" : tag, \"urls\" : []}\r\n edge[\"tags\"].append(edge_tag)\r\n if item[\"url\"] not in edge_tag[\"urls\"]:\r\n edge_tag[\"urls\"].append(item[\"url\"])\r\n if id not in edges:\r\n edges[id] = edge\r\n db.edges.insert(edges.values())", "def new_friends(self, G):\r\n H = G.to_undirected() #creates an undirected copy of the original graph\r\n n = nx.preferential_attachment(H) #uses the preferential_attachment method from networkx to create friends\r\n for u, v, p in n:\r\n chance = random.randint(0, 100) #chance is a randomly generated number between 0 and 100\r\n if p >= len(G.edges) and chance >= 90: #creates a new relationship (edge) between two nodes if their preferential\r\n G.add_edge(u, v, weight=random.uniform(-1, 1)) #attachment number is higher than the total number of edges and\r\n else: #chance is greater than 90.\r\n continue\r\n return G", "def _makeEdges(self):\n self.edges = set()\n\n for i in range(self.size):\n self.edges.add(makePair(self.tour[i - 1], self.tour[i]))", "def makeGraphDictionary(self):\n graph_dict_incomplete = {}\n # dictionary contains all links, no matter if they are functional\n for i in range(0, len(self._partner_indices)):\n graph_dict_incomplete[i] = set(self._partner_indices[i])\n if self._variant[0] == \"V0_instant\":\n self.graph_dict = graph_dict_incomplete\n else:\n # helper\n link_list = []\n link_list2 = []\n for vertex in graph_dict_incomplete:\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=set())\n for neighbour in graph_dict_incomplete[vertex]:\n # Iterate through all plants and the neighbours\n # If a new pair occurs it will be appended in link_list2\n # If the pair occurs again it wll be appended in link_list\n # This means that the link (or rgf process) is finished\n # for both plants\n if {neighbour, vertex} not in link_list2:\n link_list2.append({vertex, neighbour})\n else:\n # plants are only put in the dict. if they occur more\n # than once, i.e. both partners have finished rgf\n link_list.append({vertex, neighbour})\n self.setKeyDictionary(dictionary=self.graph_dict,\n key=vertex,\n value=neighbour)", "def draw_relation_graph(database_name, table_name, primary_key, group_name) -> Graph:\n\n nodes = []\n links = []\n disease_list = get_icd_diseasegroup_diseaseinfo(database_name, table_name, primary_key, group_name)[1]\n disease_list = disease_list.split(',')\n # print(disease_list)\n\n for disease in disease_list:\n disease_node = {\n \"name\": disease,\n \"symbolSize\": 50\n }\n\n if disease_node not in nodes:\n nodes.append(disease_node)\n\n gene_list = get_mesh_disease_info(database_name, 'mesh_gene', disease, 'DISEASE_ID')[1]\n gene_list = gene_list.split(',')\n for gene in gene_list:\n gene_node = {\n 'name': gene,\n 'symbolSize': 10\n }\n\n if gene_node not in nodes:\n nodes.append(gene_node)\n\n for gene in gene_list:\n links.append({\"source\": disease, \"target\": gene})\n\n print(nodes)\n print(links)\n\n c = (\n Graph(init_opts=opts.InitOpts(width=\"1440px\", height=\"900px\")).add(\"\", nodes, links, repulsion=3000)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"gene-disease association network\"))\n )\n\n return c", "def createRelation(rid, rlabel, list, x, y):\n relation = Relation(rid, rlabel, x, y)\n list.append(relation)", "def make_nodes_and_paths(friends_lst):\n\n # nodes = {}\n\n # for item in friends_lst:\n # friend1, friend2, group = item\n # for person in pair:\n # if not nodes.get(person):\n # nodes[person] = pair[1]\n\n # nodes = [{'name': person, 'friend': nodes[person]} for person in nodes.keys()]\n\n nodes = {}\n for item in friends_lst:\n friend1, friend2, group = item\n if not nodes.get(friend1):\n nodes[friend1] = group\n elif nodes.get(friend1) > group:\n nodes[friend1] = group\n\n nodes = [{'name': person, 'group': nodes[person]} for person in nodes.keys()]\n\n index_nodes = {}\n for idx, n in enumerate(nodes):\n index_nodes[n['name']] = (idx, n['group'])\n\n paths = []\n\n # paths.append({'source': item[1], 'target': item[0]})\n\n for item in friends_lst:\n # one = User.query.get(item.user_id)\n # two = User.query.get(item.friend_id)\n source, target, group = item\n paths.append({'source': index_nodes[source][0], 'target': index_nodes[target][0]})\n\n # print nodes\n # print index_nodes\n # print paths\n\n return nodes, paths", "def make_graph_public(self, name):\n\n\t\treturn self.update_graph(name, is_public=1)", "def relations_from(self, start_node):" ]
[ "0.660073", "0.5670493", "0.5551359", "0.5534307", "0.55109274", "0.5472602", "0.5344431", "0.5332479", "0.5324703", "0.5321007", "0.5259523", "0.5253097", "0.52161443", "0.52000916", "0.5184464", "0.5157209", "0.513095", "0.51219463", "0.511944", "0.5115102", "0.5072005", "0.50661796", "0.50488216", "0.50411147", "0.5040726", "0.5023198", "0.5001443", "0.4999094", "0.4993958", "0.49860868" ]
0.6244256
1
Method that creates GET vaccine relationships
def createRelationshipsGetVaccine(d, pIds, vIds): # Choose how many new visit relationships numberOfVaccines = MAX_NUMBER_OF_VACCINE for _ in range(0, numberOfVaccines): vIndex = randint(0, len(vIds) - 1) vaccineId = vIds[vIndex] pIndex = randint(0, len(pIds) - 1) personId = pIds[pIndex] date = datetime.date.today() - datetime.timedelta(days=randint(0, VACCINES_DAYS_BACKS)) country = "Italy" # For the future: maybe do a random country # Ask to neo4j server how many vaccines the user did query = ( "MATCH (p:Person)-[r]->(v:Vaccine) " "WHERE ID(p) = $personId AND type(r)='GET_VACCINE'" "RETURN count(p) as count,ID(v) as vaccineID,r.expirationDate as date" ) with d.session() as s: datas = s.read_transaction(gettingNumberVaccines, query, personId) # if no vaccines do one, else make the second vaccine if len(datas) == 0: string2 = str(date + datetime.timedelta(days=28)).split("-") expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2])) else: if len(datas) == 1: string1 = str(datas[0]["date"]).split("-") date = datetime.date(int(string1[0]), int(string1[1]), int(string1[2])) string2 = str(date + datetime.timedelta(days=365)).split("-") expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2])) vaccineId = datas[0]["vaccineID"] else: continue date = date.strftime("%Y-%m-%d") expDate = expDate.strftime("%Y-%m-%d") query = ( "MATCH (p:Person) , (v:Vaccine) " "WHERE ID(p) = $personId AND ID(v) = $vaccineId " "MERGE (p)-[:GET_VACCINE{date:date($date),country:$country,expirationDate:date($expDate)}]->(v); " ) # Execute the query with d.session() as s: s.write_transaction(createGettingVaccine, query, personId, vaccineId, date, country, expDate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findAllGetVaccineRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) \"\n \"RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def createGettingVaccine(tx, query, personId, vaccineId, date, country, expDate):\n tx.run(query, personId=personId, vaccineId=vaccineId, date=date, country=country, expDate=expDate)", "def findAllVaccine(tx):\n query = (\n \"MATCH (v:Vaccine) \"\n \"RETURN v , ID(v);\"\n )\n results = tx.run(query).data()\n return results", "def createNodeVaccines(vaccinesList):\n vaccinesQuery = []\n for vaccineEl in vaccinesList:\n currentQuery = (\n \"CREATE (v:Vaccine {name: \\\"\" + str(vaccineEl[int(VaccineAttribute.NAME)]) + \"\\\" , producer: \\\"\" +\n str(vaccineEl[int(VaccineAttribute.PRODUCER)]) + \"\\\"}); \"\n )\n vaccinesQuery.append(currentQuery)\n return vaccinesQuery", "def relationships(self):", "def link_residues(self) -> None:\n ...", "def get(self, request, *args, **kwargs):\n region_id = request.GET.get('id', None)\n region = get_object_or_404(Region, id=region_id)\n\n provinces = Province.objects.filter(region=region)\n\n serializer = ProvinceSerializer(provinces, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def cinema_in_location(request, suburb):\n if request.method == 'GET':\n #The following is a JOIN query on the Cinema and Address tables\n cinemas = Cinema.objects.filter(address__suburb__iexact=suburb)\n serializer = MovieSerializer(cinemas, many=True)\n return JSONResponse(serializer.data)\n else:\n return HttpResponse(status=status.HTTP_405_METHOD_NOT_ALLOWED)", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def get(self, request, format=None):\n rdvs = RendezVous.objects.all()\n serializer = RendezvousSerializer(rdvs, many=True)\n return Response(serializer.data)", "def getVaccinesId(tx):\n query = (\n \"MATCH (v:Vaccine)\"\n \"RETURN ID(v)\"\n )\n\n idsList = tx.run(query).data()\n return idsList", "def get_tenants(self):", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def findAllVisitRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:VISIT]->(n2:Location) \"\n \"RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def get(self, request, *args, **kwargs):\n province_id = request.GET.get('id', None)\n province = get_object_or_404(Province, id=province_id)\n\n cities = City.objects.filter(province=province)\n\n serializer = CitySerializer(cities, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get(self, request, *args, **kwargs):\n address = ProfileAddress.objects.filter(profile__user__is_verified=True)\n address = address.exclude(city__isnull=True)\n coordinates = [c.city.get_coords() for c in address]\n\n serializer = CityCoordinateSerializer(coordinates, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get(self, request, *args, **kwargs):\n urls = Link.objects.filter(user=request.user.id)\n serializer = UrlSerializer(urls, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get_curso_prerequisitos(request):\n if request.method == 'GET':\n serializer = CursoSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n if \"curso\" in serializer.validated_data:\n try:\n curso = Curso.nodes.get(nombre__icontains=serializer.validated_data[\"curso\"])\n resp = {\"nombre_curso\": curso.__dict__[\"nombre\"],\n \"pre_requisitos\": curso.__dict__[\"pre_requisitos\"]}\n return JsonResponse(resp, status=status.HTTP_200_OK)\n except Curso.DoesNotExist:\n print(\"Cant find (CURSO) \", serializer.validated_data[\"curso\"])\n return JsonResponse({\"error\": \"(CURSO) \" + serializer.validated_data[\"curso\"] + \" not found \"},\n status=status.HTTP_404_NOT_FOUND)\n if \"codigo\" in serializer.validated_data:\n try:\n curso = Curso.nodes.get(cod__icontains=serializer.validated_data[\"codigo\"])\n resp = {\"nombre_curso\": curso.__dict__[\"nombre\"],\n \"pre_requisitos\": curso.__dict__[\"pre_requisitos\"]}\n return JsonResponse(resp, status=status.HTTP_200_OK)\n except Curso.DoesNotExist:\n print(\"Cant find (CURSO) \", serializer.validated_data[\"codigo\"])\n return JsonResponse({\"error\": \"(CURSO) \" + serializer.validated_data[\"codigo\"] + \" not found \"},\n status=status.HTTP_404_NOT_FOUND)\n if \"synonym\" in serializer.validated_data:\n try:\n sinonimo = Sinonimo.nodes.get(sinonimo__icontains=serializer.validated_data[\"synonym\"])\n print(sinonimo.__dict__[\"curso\"][\"nombre\"])\n resp = {\"nombre_curso\": \"WIP\"}\n # resp = {\"nombre_curso\": curso.__dict__[\"nombre\"], \"fecha_inicio\": curso.__dict__[\"fecha_inicio\"]}\n return JsonResponse(resp, status=status.HTTP_200_OK)\n except Curso.DoesNotExist:\n print(\"Cant find (CURSO) \", serializer.validated_data[\"synonym\"])\n return JsonResponse({\"error\": \"(CURSO) \" + serializer.validated_data[\"synonym\"] + \" not found \"},\n status=status.HTTP_404_NOT_FOUND)\n return JsonResponse(serializer.errors, status=status.HTTP_404_NOT_FOUND)", "def get(self,request):\n\n tours = Tour.objects.all()\n serializer = TourSerializer(tours,many=True)\n return Response(serializer.data)", "def get(self, request):\n user = request.user\n properties = []\n properties_of_interest = user.property_of_interest.all()\n for property_of_interest in properties_of_interest:\n properties.append(property_of_interest.listed_property)\n serializer = PropertySerializer(properties, many=True)\n return Response(serializer.data)", "def assoc_list():\n if not check_content_type():\n return jsonify(status=CONTENT_TYPE_ERROR)\n reqdata = request.json\n if not check_token(reqdata[\"token\"]):\n return jsonify(status=TOKEN_ERROR)\n users_paths = db.session.query(UserPathAssociation).all()\n resdata = []\n for e in users_paths:\n resdata.append({\"id\" : e.id, \"user_id\":e.user_id, \"path_id\":e.path_id})\n return jsonify(data=resdata, status=OK_STATUS)", "def createRelationshipsInfect(id, test_date, test_hour, daysBack):\n familyQuery = (\n \"MATCH (pp:Person)-[:LIVE]->(h:House)<-[:LIVE]-(ip:Person) \"\n \"WHERE ID(pp) = $id AND ip <> pp AND NOT (ip)<-[:COVID_EXPOSURE]-(pp)\"\n \"RETURN DISTINCT ID(ip);\"\n )\n\n \"\"\"\n IMPORTANT: ($date) represents the date from which we check the contacts. It is the date of positive test - 7 days\n We check all contacts until the date of positive test\n \"\"\"\n appContactQuery = (\n \"MATCH (pp:Person)-[r1:APP_CONTACT]->(ip:Person) \"\n \"WHERE ID(pp) = $id AND (r1.date > date($date) OR (r1.date = date($date) AND r1.hour >= time($hour))) \"\n \"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND \"\n \"r1.hour <= time($hour))) \"\n \"AND NOT \"\n \"(pp)-[:COVID_EXPOSURE{date: r1.date}]->(ip)\"\n \"RETURN DISTINCT ID(ip) , r1.date;\"\n )\n locationContactQuery = (\n \"MATCH (pp:Person)-[r1:VISIT]->(l:Location)<-[r2:VISIT]-(ip:Person) \"\n \"WHERE ID(pp) = $id AND ip <> pp AND (r1.date > date($date) OR (r1.date = date($date) AND r1.start_hour >= time($hour))) \"\n \"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND \"\n \"r1.end_hour <= time($hour))) AND r2.date = r1.date AND \"\n \"((r1.start_hour < r2.start_hour AND r1.end_hour > r2.start_hour) OR \"\n \"(r2.start_hour < r1.start_hour AND r2.end_hour > r1.start_hour)) AND NOT \"\n \"(pp)-[:COVID_EXPOSURE{name: l.name , date: r1.date}]->(ip)\"\n \"RETURN DISTINCT ID(ip) , r1.date , l.name;\"\n )\n\n # date = datetime.date.today() - datetime.timedelta(daysBack)\n \"\"\"\n date is referred to date test - daysback \n \"\"\"\n date = test_date - datetime.timedelta(daysBack)\n infectedIds = []\n with driver.session() as s:\n familyInfected = s.read_transaction(findInfectInFamily, familyQuery, id)\n appInfected = s.read_transaction(findInfect, appContactQuery, id, date, test_hour)\n locationInfected = s.read_transaction(findInfect, locationContactQuery, id, date, test_hour)\n\n for el in familyInfected, appInfected, locationInfected:\n if len(el) > 0:\n # Take just the id\n infectedIds.append(el[0]['ID(ip)'])\n\n infectedIds = []\n for el in familyInfected:\n infectedIds.append(el['ID(ip)'])\n\n for infectedId in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date:date($date)}]->(ip);\"\n )\n s.write_transaction(createInfectFamily, query, id, infectedId, date.strftime(\"%Y-%m-%d\"))\n\n infectedIds = []\n for el in appInfected:\n details = []\n details.append(el['ID(ip)'])\n details.append(el['r1.date'])\n infectedIds.append(details)\n\n for infectedId, infectedDate in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date: date($date)}]->(ip);\"\n )\n s.write_transaction(createInfectApp, query, id, infectedId, infectedDate)\n\n infectedIds = []\n\n for el in locationInfected:\n details = []\n details.append(el['ID(ip)'])\n details.append(el['r1.date'])\n details.append(el['l.name'])\n infectedIds.append(details)\n\n for infectedId, infectedDate, infectedPlace in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date: date($date) , name: $name}]->(ip);\"\n )\n s.write_transaction(createInfectLocation, query, id, infectedId, infectedDate, infectedPlace)", "def vaccinations(self, from_date: str, to_date: str) -> VaccinationList:\n params = {'date_from': from_date, 'date_to': to_date}\n data = self.get(\"mdg_emvolio\", params=params)\n\n ls = [Vaccination(**area) for area in data]\n return VaccinationList(items=ls)", "def test_relationship_edges(self):\n path = os.path.join(get_file_dir(), 'data', 'GO_edges_relationship.json')\n with open(path, 'rt') as json_file:\n json_files = []\n for data in json_file:\n json_files.append(json.loads(data))\n for entry in json_files:\n if entry[\"id\"] == \"GO:0000332__GO:0003720__part_of\":\n self.assertEqual(entry[\"from\"], \"GO_term/GO:0000332\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0003720\")\n self.assertEqual(entry[\"relationship_type\"], \"part_of\")\n if entry[\"from\"] == \"GO_term/GO:0000335\":\n self.assertEqual(entry[\"id\"], \"GO:0000335__GO:0006313__negatively_regulates\")\n self.assertEqual(entry[\"to\"], \"GO_term/GO:0006313\")\n self.assertEqual(entry[\"relationship_type\"], \"negatively_regulates\")", "def get_cursos(request):\n if request.method == 'GET':\n cursos = Curso.nodes.all()\n cursos_list = []\n for i in range(0, len(cursos)):\n cursos_list.append(cursos[i].__dict__[\"nombre\"])\n return JsonResponse({\"cursos\": cursos_list})", "def get(self, request, *args, **kwargs):\n country_id = request.GET.get('id', None)\n country = get_object_or_404(Country, id=country_id)\n\n regions = Region.objects.filter(country=country)\n\n serializer = RegionSerializer(regions, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def cinema_list(request):\n if request.method == 'GET':\n cinemas = Cinema.objects.all()\n serializer = CinemaSerializer(cinemas, many=True)\n return JSONResponse(serializer.data)\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = CinemaSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return JSONResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return HttpResponse(status=status.HTTP_405_METHOD_NOT_ALLOWED)", "def antweb_links(request, format='csv'):\n\n\n\ttaxonomy = []\n\tif request.GET.get('taxon_code'):\n\t\ttaxonomy = Taxonomy.objects.raw(\"\"\"\n\t\tSELECT taxon_code, subfamily_name, genus_name, species_name\n\t\tFROM map_taxonomy_list\n\t\tWHERE taxon_code = %s\n\t\t\"\"\", [request.GET.get('taxon_code')])\n\t\t\n\t\t# serialize to JSON\n\t\tjson_objects = [{'key': t.taxon_code, 'speciesName': t.species_name, 'genusName': t.genus_name, 'subfamilyName': t.subfamily_name} for t in taxonomy]\n\t\t\n\t\treturn JSONResponse({'taxonomy': json_objects})\n\t\t\n\telif request.GET.get('genus_name'):\n\t\ttaxonomy = Taxonomy.objects.raw(\"\"\"\n\t\tSELECT genus_name, subfamily_name,taxon_code\n\t\tFROM map_taxonomy_list\n\t\tWHERE genus_name = %s\n\t\tGROUP BY genus_name, subfamily_name,taxon_code\n\t\t\"\"\", [request.GET.get('genus_name')])\n\t\t\n\t\t# serialize to JSON\n\t\tjson_objects = [{'key': t.genus_name, 'subfamilyName': t.subfamily_name} for t in taxonomy]\n\t\t\n\t\treturn JSONResponse({'taxonomy': json_objects})\n\t\n\telse:\n\t\treturn JSONResponse({'taxonomy': []})", "def get_curso_inscripcion(request):\n if request.method == 'GET':\n serializer = CursoSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n if \"curso\" in serializer.validated_data:\n try:\n curso = Curso.nodes.get(nombre__icontains=serializer.validated_data[\"curso\"])\n resp = {\"nombre_curso\": curso.__dict__[\"nombre\"], \"link\": curso.__dict__[\"link\"]}\n return JsonResponse(resp, status=status.HTTP_200_OK)\n except Curso.DoesNotExist:\n print(\"Cant find (CURSO) \", serializer.validated_data[\"curso\"])\n return JsonResponse({\"error\": \"(CURSO) \" + serializer.validated_data[\"curso\"] + \" not found \"},\n status=status.HTTP_404_NOT_FOUND)\n if \"codigo\" in serializer.validated_data:\n try:\n curso = Curso.nodes.get(cod__icontains=serializer.validated_data[\"codigo\"])\n resp = {\"nombre_curso\": curso.__dict__[\"nombre\"], \"link\": curso.__dict__[\"link\"]}\n return JsonResponse(resp, status=status.HTTP_200_OK)\n except Curso.DoesNotExist:\n print(\"Cant find (CURSO) \", serializer.validated_data[\"codigo\"])\n return JsonResponse({\"error\": \"(CURSO) \" + serializer.validated_data[\"codigo\"] + \" not found \"},\n status=status.HTTP_404_NOT_FOUND)\n return JsonResponse(serializer.errors, status=status.HTTP_404_NOT_FOUND)", "def test_getCpfRelations(self):\n pass" ]
[ "0.7040181", "0.6066519", "0.5884874", "0.5713347", "0.56355274", "0.52946997", "0.5129973", "0.5061427", "0.5022418", "0.49662134", "0.49501115", "0.4892924", "0.48890257", "0.48078194", "0.48069763", "0.47821605", "0.47716796", "0.47676593", "0.47588113", "0.4755011", "0.474546", "0.47346005", "0.47262296", "0.47194672", "0.47118244", "0.47049588", "0.4701229", "0.46977904", "0.46844617", "0.46739492" ]
0.7255064
0
Method that creates MAKE test relationships
def createRelationshipsMakeTest(d, pIds, tIds): # Choose how many new visit relationships numberOfTest = MAX_NUMBER_OF_TEST for _ in range(0, numberOfTest): probability = random() tIndex = randint(0, len(tIds) - 1) testId = tIds[tIndex] pIndex = randint(0, len(pIds) - 1) personId = pIds[pIndex] date = datetime.date.today() - datetime.timedelta(days=randint(0, TESTS_DAYS_BACKS)) h = randint(0, 23) minutes = randint(0, 59) if minutes < 10: minutes = "0" + str(minutes) string_date = date.strftime("%Y-%m-%d") hour = str(h) + ":" + str(minutes) if probability < PROBABILITY_TO_BE_POSITIVE: result = "Positive" else: result = "Negative" query = ( "MATCH (p:Person) , (t:Test) " "WHERE ID(p) = $personId AND ID(t) = $testId " "MERGE (p)-[:MAKE_TEST{date:date($date) , hour: time($hour) ,result:$result}]->(t); " ) # If negative, all infections have to be neglected if probability >= PROBABILITY_TO_BE_POSITIVE: # Check whether or not I have been infected by someone delete_possible_infection_command = ( "MATCH ()-[i:COVID_EXPOSURE]->(p:Person)" "WHERE ID(p) = $personId AND (date($date) >= i.date + duration({days: 7})) " "DELETE i" ) with d.session() as s: s.write_transaction(delete_possible_infection, delete_possible_infection_command, personId, string_date, hour) # Execute the query with d.session() as s: s.write_transaction(createMakingTest, query, personId, testId, string_date, hour, result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findAllMakeTestRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:MAKE_TEST]->(n2:Test) \"\n \"RETURN ID(n1) , r , r.date , r.hour , r.result , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def test_add_relation_types(self):\n pass", "def test_add_relation_type(self):\n pass", "def test_create(self):\n pass", "def setUp(self):\n\n\n # InverseLabeling\n invLabeling0 = {'L0': [0, 1, 2]}\n\n invLabeling1 = {'L0' : [0, 2],\n 'L1' : [1]}\n\n invLabeling2 = {\n 'L0' : [0],\n 'L1' : [1],\n 'L2' : [2]\n }\n\n invLabeling3 = {\n 'L1' : [0, 1],\n 'L2' : [2]\n }\n\n invLabeling4 = {\n 'L0' : [0,1],\n 'L1' : [0],\n 'L2' : [2]\n }\n\n invLabeling5 = {\n 'L0': [0, 1, 2],\n 'L1': []\n }\n \n # Create some ontologies\n ontology0 = {'L0': ['L0']}\n\n ontology1 = {}\n\n ontology2 = {'L0': ['L1']}\n\n ontology3 = {'L0': ['L1', 'L2'],\n 'L1': ['L2'],\n 'L2': ['L0']}\n\n if self.id().split('.')[-1] == 'test_createLinkograph':\n self.testParams = [\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology0,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, {0}, {2}),\n ({'L0'}, {0,1}, set())] \n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology1,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology2,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology0,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {2}),\n ({'L1'}, set(), set()),\n ({'L0'}, {0}, set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology1,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L1'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology2,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1}),\n ({'L1'}, {0}, set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1}),\n ({'L1'}, {0}, set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling2,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1,2}),\n ({'L1'}, {0}, {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling3,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L1'}, set(), {2}),\n ({'L1'}, set(), {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling4,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0', 'L1'}, set(), {2}),\n ({'L0'}, set(), {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling5,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n ]", "def test_create_rule(self):\n pass", "def make_test_object(self):\n return self.orm_cls.testing_create()", "def test_get_relationship_templates(self):\n pass", "def test_createLinkograph(self):\n self.performTestForParams()", "def test_map_object(self):\n with factories.single_commit():\n program = factories.ProgramFactory()\n audit = factories.AuditFactory(program=program)\n factories.RelationshipFactory(\n source=audit,\n destination=program\n )\n product = factories.ProductFactory()\n factories.RelationshipFactory(\n source=program,\n destination=product\n )\n\n data = [{\n \"relationship\": {\n \"context\": None,\n \"destination\": {\n \"id\": product.id,\n \"type\": \"Product\",\n \"href\": \"/api/products/{}\".format(product.id)\n },\n \"source\": {\n \"id\": audit.id,\n \"type\": \"Audit\",\n \"href\": \"/api/audits/{}\".format(audit.id)\n }\n }\n }]\n\n response = self.api.client.post(\n \"/api/relationships\",\n data=json.dumps(data),\n headers=self.headers\n )\n self.assert200(response)", "def test_create_goal(self):\n pass", "def default_create_test_data(self, db_name):\n anchor1 = AppDeleteAnchor1.objects.using(db_name).create(value=100)\n anchor2 = AppDeleteAnchor1.objects.using(db_name).create(value=100)\n\n model = AppDeleteBaseModel.objects.using(db_name).create(\n char_field='test',\n int_field=1,\n anchor_fk=anchor1)\n model.m2m.add(anchor2)", "def test_getResourceRelations(self):\n pass", "def create_models( self ):", "def setUpTestData(cls):\n cls.test_resource = Resource(name='Test', slug='test', description='')\n cls.test_resource.full_clean()\n cls.test_resource.save()\n cls.test_faculty = Faculty(name='Test', slug='test')\n cls.test_faculty.full_clean()\n cls.test_faculty.save()\n cls.test_department = Department(name='Test', slug='test', faculty=cls.test_faculty)\n cls.test_department.full_clean()\n cls.test_department.save()\n cls.test_agreement = Agreement(title='test-one',\n slug='test-one',\n resource=cls.test_resource,\n body='body',\n redirect_url='https://example.com',\n redirect_text='example-redirect')\n cls.test_agreement.full_clean()\n cls.test_agreement.save()\n cls.test_user = get_user_model().objects.create_user(username='test',\n first_name='test',\n last_name='test',\n email='[email protected]',\n password='testtesttest')", "def setUp(self):\n self.title = \"test_a\"\n self.studio = Studio.objects.create(name=\"test_b\", city=\"test_b\")\n self.release_date = \"1st Feb 2015\"\n self.director = People.objects.create(name=\"test_b\", birth_date=\"00/00/2000\")\n self.actors = [People.objects.create(name=\"test_\", birth_date=\"00/00/2000\")]\n self.film = Film.objects.create(title=self.title, \n studio=self.studio,\n release_date=self.release_date,\n director = self.director,\n )", "def test_getCpfRelations(self):\n pass", "def setUp(self):\n self.test_resource = Resource(name='Test', slug='test', description='')\n self.test_resource.full_clean()\n self.test_resource.save()\n self.test_resource_two = Resource(name='Test Two', slug='testtwo', description='')\n self.test_resource_two.full_clean()\n self.test_resource_two.save()", "def test_create_record(self):\n pass", "def setUp(self):\n\n singleLabels = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_2 = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2}),\n ({'D'}, {0}, set()),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_1 = linkoCreate.Linkograph(\n [({'A'}, set(), {1}),\n ({'D'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_0 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_2 = linkoCreate.Linkograph(\n [({'D'}, set(), set()),\n ({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_1 = linkoCreate.Linkograph(\n [({'D'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n trivialLinkograph = linkoCreate.Linkograph(\n [], ['A', 'B', 'C', 'D'])\n\n\n singleSubLinko1_4 = linkoCreate.Linkograph(\n [({'D'}, set(), {2,3}),\n ({'A'}, set(), {3}),\n ({'C'}, {0}, {3}),\n ({'A'}, {0,1,2}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko2_4 = linkoCreate.Linkograph(\n [({'A'}, set(), {2}),\n ({'C'}, set(), {2}),\n ({'A'}, {0,1}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko3_4 = linkoCreate.Linkograph(\n [({'C'}, set(), {1}),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko4_4 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n simpleLinko = linkoCreate.Linkograph(\n [({'A', 'B', 'C'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'B', 'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n if self.id().split('.')[-1] == 'test_createSubLinkographWithoutCommands':\n self.testParams = [\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': None,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 5,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko0_1},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 0,\n 'ExpectedLinkograph': singleSubLinko0_0},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko1_2},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko1_1},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 0,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko1_4},\n\n {'linko': singleLabels,\n 'lowerBound': 2,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko2_4},\n\n {'linko': singleLabels,\n 'lowerBound': 3,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko3_4},\n\n {'linko': singleLabels,\n 'lowerBound': 4,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko4_4},\n\n ]", "def test_merge_model_relationships(bf, dataset, organization, assert_in_neo4j):\n person = dataset.create_model(\n \"Person\",\n schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)],\n )\n\n food = dataset.create_model(\n \"Food\", schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)]\n )\n\n color = dataset.create_model(\n \"Color\",\n schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)],\n )\n\n # Relationship type with no \"from\" and \"to\"\n likes = dataset.create_relationship_type(\"Likes\", \"likes\")\n\n # Relationship type with \"from\" and \"to\", but no instances\n dataset.create_relationship_type(\n \"Appreciates\", \"appreciates\", source=person.id, destination=color.id\n )\n\n alice = person.create_record({\"name\": \"Alice\"})\n bob = person.create_record({\"name\": \"Bob\"})\n charlie = person.create_record({\"name\": \"Charlie\"})\n\n ice_cream = food.create_record({\"name\": \"Ice Cream\"})\n\n alice_likes_bob = likes.relate(alice, bob)\n bob_likes_charlie = likes.relate(bob, charlie)\n alice_likes_ice_cream = likes.relate(alice, ice_cream)\n\n # At this point we have in the relation_types file\n #\n # ()-[likes]->()\n # (person)-[appreciates]->(color)\n #\n # and in the schemaRelations file\n #\n # (person)-[likes]->(person)\n # (person)-[likes]->(food)\n #\n # The /relationships endpoint on the old service *only* returns things in\n # the relation_types file.\n #\n # But the new service should merge them both together to create all\n # necessary model relationships and stubs:\n #\n # ()-[likes]->()\n # (person)-[appreciates]->(color)\n # (person)-[likes]->(person)\n # (person)-[likes]->(food)\n\n migrate_dataset(\n organization_id=organization.int_id,\n # organization_node_id=organization.id,\n dataset_ids=[dataset.int_id]\n # dataset_node_id=dataset.id,\n )\n\n assert_in_neo4j()\n\n # Drop into raw requests because of\n # https://app.clickup.com/t/426zh9\n relationships = bf._api.concepts.relationships._get(\n bf._api.concepts.relationships._uri(\n \"/{dataset_id}/relationships\", dataset_id=dataset.id\n )\n )\n\n assert sorted(\n [(r[\"from\"] or \"*\", r[\"name\"], r[\"to\"] or \"*\") for r in relationships]\n ) == sorted(\n [\n (\"*\", \"Likes\", \"*\"),\n (person.id, \"Likes\", food.id),\n (person.id, \"Likes\", person.id),\n (person.id, \"Appreciates\", color.id),\n ]\n )", "def relationships(self):", "def setUp(self):\n self.test_faculty = Faculty(name='Test', slug='test')\n self.test_faculty.full_clean()\n self.test_faculty.save()\n self.test_department = Department(name='Test', slug='test', faculty=self.test_faculty)\n self.test_department.full_clean()\n self.test_department.save()\n self.test_resource = Resource(name='Test', slug='test', description='')\n self.test_resource.full_clean()\n self.test_resource.save()\n self.test_agreement = Agreement(title='test-one',\n slug='test-one',\n resource=self.test_resource,\n body='body',\n redirect_url='https://example.com',\n redirect_text='example-redirect')\n self.test_agreement.full_clean()\n self.test_agreement.save()", "def test_create10(self):\n pass", "def setUp(self):\n\n User.query.delete()\n Message.query.delete()\n Follows.query.delete()\n\n self.client = app.test_client()\n\n u = User(\n id = 1,\n email=\"[email protected]\",\n username=\"testuser\",\n password=\"HASHED_PASSWORD\"\n )\n\n u2 = User(\n id = 2,\n email=\"[email protected]\",\n username=\"testuser2\",\n password=\"HASHED_PASSWORD\"\n )\n\n db.session.add_all([u, u2])\n db.session.commit()\n\n m_u1 = Message(\n text = \"TestMessage1\",\n user_id = u.id\n ) \n\n m_u2 = Message(\n text = \"TestMessage1\",\n user_id = u2.id\n )\n\n db.session.add_all([m_u1, m_u2])\n db.session.commit()\n\n self.u = u\n self.u2 = u2\n self.m_u1 = m_u1\n self.m_u2 = m_u2", "def setUp(self):\n super(TranscriptionsTest, self).setUp()\n mommy.make_recipe('grunt.seed', _quantity=2)", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}/{}\".format('localhost:5432', self.database_name)\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n \n\n # Create Test Object\n \n self.new_Question = {\n 'question':'What is the tallest building',\n 'answer':'burjdubai',\n 'category':'4',\n 'difficulty':'2'\n }", "def setUpTestData(cls):\n\t\thierarchy = Hierarchy(name=\"TestHierarchy\", graph_representation=\"{}\")\n\t\thierarchy.save()\n\t\tevent_type = EventType(name=\"asd\", hierarchy=hierarchy)\n\t\tevent_type.save()\n\t\tquery = Query(\n\t\t\thierarchy=hierarchy, query_string=\"INSERT INTO asd SELECT * FROM asd\",\n\t\t\toutput_event_type=event_type,\n\t\t\teqmn_representation=\"{'output': {'name': 'asd', 'select': '*'}, 'input': {'single': 'asd'}}\")\n\t\tquery.save()\n\t\tquery.input_event_types.add(event_type)\n\t\tquery.save()", "def setUp(self):\n\n if self.id().split('.')[-1] == 'test_checkLinkoStructure':\n self.testParams = [\n {'linko':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, set(), {2}),\n ({'L0'}, {0,1}, set())]),\n 'labels': False,\n 'expectedResult': False,\n 'expectedErrors':\n {1: ({0}, set())\n }\n },\n {'linko':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, {0}, set()),\n ({'L0'}, {0,1}, set())]),\n 'labels': False,\n 'expectedResult': False,\n 'expectedErrors':\n {1: (set(), {2})\n }\n },\n {'linko':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, {0}, {2}),\n ({'L0'}, {0,1}, set())]),\n 'labels': False,\n 'expectedResult': True,\n 'expectedErrors': {}\n },\n {'linko':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2, 5}),\n ({'L0'}, {0}, {2}),\n ({'L0'}, {0,1}, set())]),\n 'labels': False,\n 'expectedResult': False,\n 'expectedErrors':\n {\n 'missing': {5},\n 5: ({0}, set())\n }\n },\n ]", "def test02(self):\n\t\tself.model = DomainModel('Test Domain Model',\n\t\t\tActor('Driver',\n\t\t\t\t\"\"\"Person who drives a car\"\"\",\n\t\t\t\tisA('Person')\n\t\t\t),\n\t\t\tActor('Passenger',\n\t\t\t\t\"\"\"Person who rides in a car\"\"\",\n\t\t\t\tisA('Person')\n\t\t\t),\n\t\t\tActor('Person',\n\t\t\t\t\"\"\"A human being, capable of driving and being driven\"\"\"\n\t\t\t),\n\t\t\tNoun('Car',\n\t\t\t\t\"\"\"A type of vehicle\"\"\",\n\t\t\t\thas('Driver','Passengers')\n\t\t\t)\n\t\t)" ]
[ "0.6820703", "0.6809778", "0.67368627", "0.66004425", "0.6538386", "0.6388511", "0.637721", "0.63673156", "0.63041615", "0.6233009", "0.6232004", "0.6227628", "0.61946553", "0.6189991", "0.61586624", "0.615512", "0.6111099", "0.60820246", "0.6068611", "0.60661596", "0.60507816", "0.6025865", "0.60231185", "0.6018186", "0.5991372", "0.5964736", "0.5960763", "0.59210336", "0.5920024", "0.5911228" ]
0.69270027
0
Method that executes the query to create a VISIT relationship
def createVisit(tx, query, personId, locationId, date, startHour, endHour): tx.run(query, personId=personId, locationId=locationId, date=date, startHour=startHour, endHour=endHour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relationships(self):", "def createRelationshipsVisit(d, pIds, lIds):\n # Choose how many new visit relationships\n numberOfVisits = MAX_NUMBER_OF_VISIT\n\n for _ in range(0, numberOfVisits):\n lIndex = randint(0, len(lIds) - 1)\n locationId = lIds[lIndex]\n pIndex = randint(0, len(pIds) - 1)\n personId = pIds[pIndex]\n # Choose the hour/date\n\n date = datetime.date.today() - datetime.timedelta(days=randint(0, VISITS_DAYS_BACKS))\n date = date.strftime(\"%Y-%m-%d\")\n h = randint(0, 22)\n minutes = randint(0, 59)\n if minutes < 10:\n minutes = \"0\" + str(minutes)\n startHour = str(h) + \":\" + str(minutes)\n h = randint(h, 23)\n minutes = randint(0, 59)\n if minutes < 10:\n minutes = \"0\" + str(minutes)\n endHour = str(h) + \":\" + str(minutes)\n n = 0\n while not validateDate(d, date, personId, endHour) and n < MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE:\n date = datetime.date.today() - datetime.timedelta(days=randint(0, 150))\n date = date.strftime(\"%Y-%m-%d\")\n h = randint(0, 22)\n minutes = randint(0, 59)\n if minutes < 10:\n minutes = \"0\" + str(minutes)\n startHour = str(h) + \":\" + str(minutes)\n h = randint(h, 23)\n minutes = randint(0, 59)\n if minutes < 10:\n minutes = \"0\" + str(minutes)\n endHour = str(h) + \":\" + str(minutes)\n n = n + 1\n if n == MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE:\n continue\n query = (\n \"MATCH (p:Person) , (l:Location) \"\n \"WHERE ID(p) = $personId AND ID(l) = $locationId \"\n \"MERGE (p)-[:VISIT {date: date($date) , start_hour: time($startHour) , end_hour: time($endHour)}]->(l); \"\n )\n # Execute the query\n\n with d.session() as s:\n s.write_transaction(createVisit, query, personId, locationId, date, startHour, endHour)", "def createRelationshipsGetVaccine(d, pIds, vIds):\n # Choose how many new visit relationships\n numberOfVaccines = MAX_NUMBER_OF_VACCINE\n\n for _ in range(0, numberOfVaccines):\n vIndex = randint(0, len(vIds) - 1)\n vaccineId = vIds[vIndex]\n pIndex = randint(0, len(pIds) - 1)\n personId = pIds[pIndex]\n date = datetime.date.today() - datetime.timedelta(days=randint(0, VACCINES_DAYS_BACKS))\n country = \"Italy\"\n # For the future: maybe do a random country\n # Ask to neo4j server how many vaccines the user did\n query = (\n \"MATCH (p:Person)-[r]->(v:Vaccine) \"\n \"WHERE ID(p) = $personId AND type(r)='GET_VACCINE'\"\n \"RETURN count(p) as count,ID(v) as vaccineID,r.expirationDate as date\"\n )\n with d.session() as s:\n datas = s.read_transaction(gettingNumberVaccines, query, personId)\n\n # if no vaccines do one, else make the second vaccine\n if len(datas) == 0:\n string2 = str(date + datetime.timedelta(days=28)).split(\"-\")\n expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2]))\n\n else:\n if len(datas) == 1:\n string1 = str(datas[0][\"date\"]).split(\"-\")\n date = datetime.date(int(string1[0]), int(string1[1]), int(string1[2]))\n string2 = str(date + datetime.timedelta(days=365)).split(\"-\")\n expDate = datetime.date(int(string2[0]), int(string2[1]), int(string2[2]))\n\n vaccineId = datas[0][\"vaccineID\"]\n else:\n continue\n date = date.strftime(\"%Y-%m-%d\")\n expDate = expDate.strftime(\"%Y-%m-%d\")\n\n query = (\n \"MATCH (p:Person) , (v:Vaccine) \"\n \"WHERE ID(p) = $personId AND ID(v) = $vaccineId \"\n \"MERGE (p)-[:GET_VACCINE{date:date($date),country:$country,expirationDate:date($expDate)}]->(v); \"\n )\n\n # Execute the query\n with d.session() as s:\n s.write_transaction(createGettingVaccine, query, personId, vaccineId, date, country, expDate)", "def createRelationshipsInfect(id, test_date, test_hour, daysBack):\n familyQuery = (\n \"MATCH (pp:Person)-[:LIVE]->(h:House)<-[:LIVE]-(ip:Person) \"\n \"WHERE ID(pp) = $id AND ip <> pp AND NOT (ip)<-[:COVID_EXPOSURE]-(pp)\"\n \"RETURN DISTINCT ID(ip);\"\n )\n\n \"\"\"\n IMPORTANT: ($date) represents the date from which we check the contacts. It is the date of positive test - 7 days\n We check all contacts until the date of positive test\n \"\"\"\n appContactQuery = (\n \"MATCH (pp:Person)-[r1:APP_CONTACT]->(ip:Person) \"\n \"WHERE ID(pp) = $id AND (r1.date > date($date) OR (r1.date = date($date) AND r1.hour >= time($hour))) \"\n \"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND \"\n \"r1.hour <= time($hour))) \"\n \"AND NOT \"\n \"(pp)-[:COVID_EXPOSURE{date: r1.date}]->(ip)\"\n \"RETURN DISTINCT ID(ip) , r1.date;\"\n )\n locationContactQuery = (\n \"MATCH (pp:Person)-[r1:VISIT]->(l:Location)<-[r2:VISIT]-(ip:Person) \"\n \"WHERE ID(pp) = $id AND ip <> pp AND (r1.date > date($date) OR (r1.date = date($date) AND r1.start_hour >= time($hour))) \"\n \"AND (r1.date < date($date) + duration({days:7}) OR (r1.date = date($date)+duration({days:7}) AND \"\n \"r1.end_hour <= time($hour))) AND r2.date = r1.date AND \"\n \"((r1.start_hour < r2.start_hour AND r1.end_hour > r2.start_hour) OR \"\n \"(r2.start_hour < r1.start_hour AND r2.end_hour > r1.start_hour)) AND NOT \"\n \"(pp)-[:COVID_EXPOSURE{name: l.name , date: r1.date}]->(ip)\"\n \"RETURN DISTINCT ID(ip) , r1.date , l.name;\"\n )\n\n # date = datetime.date.today() - datetime.timedelta(daysBack)\n \"\"\"\n date is referred to date test - daysback \n \"\"\"\n date = test_date - datetime.timedelta(daysBack)\n infectedIds = []\n with driver.session() as s:\n familyInfected = s.read_transaction(findInfectInFamily, familyQuery, id)\n appInfected = s.read_transaction(findInfect, appContactQuery, id, date, test_hour)\n locationInfected = s.read_transaction(findInfect, locationContactQuery, id, date, test_hour)\n\n for el in familyInfected, appInfected, locationInfected:\n if len(el) > 0:\n # Take just the id\n infectedIds.append(el[0]['ID(ip)'])\n\n infectedIds = []\n for el in familyInfected:\n infectedIds.append(el['ID(ip)'])\n\n for infectedId in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date:date($date)}]->(ip);\"\n )\n s.write_transaction(createInfectFamily, query, id, infectedId, date.strftime(\"%Y-%m-%d\"))\n\n infectedIds = []\n for el in appInfected:\n details = []\n details.append(el['ID(ip)'])\n details.append(el['r1.date'])\n infectedIds.append(details)\n\n for infectedId, infectedDate in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date: date($date)}]->(ip);\"\n )\n s.write_transaction(createInfectApp, query, id, infectedId, infectedDate)\n\n infectedIds = []\n\n for el in locationInfected:\n details = []\n details.append(el['ID(ip)'])\n details.append(el['r1.date'])\n details.append(el['l.name'])\n infectedIds.append(details)\n\n for infectedId, infectedDate, infectedPlace in infectedIds:\n query = (\n \"MATCH (pp:Person) , (ip:Person) \"\n \"WHERE ID(pp) = $id AND ID(ip) = $ipid \"\n \"CREATE (pp)-[:COVID_EXPOSURE{date: date($date) , name: $name}]->(ip);\"\n )\n s.write_transaction(createInfectLocation, query, id, infectedId, infectedDate, infectedPlace)", "def create_relation_to_episode(episode_id):\n epi = Episode.query.get(episode_id)\n if not epi:\n abort(404)\n\n\n data = request.json\n if any([\n 'id' in data and not isinstance(data.get('id'), int)\n ]):\n abort(400)\n\n dire = Director.query.get(data[\"id\"])\n if not dire:\n abort(404)\n\n epi.directors.append(dire)\n db.session.commit()\n return jsonify({'result': f\"{dire} directed episode {epi}\"})", "def findAllVisitRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:VISIT]->(n2:Location) \"\n \"RETURN ID(n1) , r , r.date , r.start_hour , r.end_hour , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def pluto_handler(self, pluto_entity):\n self.create_node()\n pluto_entity.create_unique_relationship('QUERIES', self.node,\n radd_fields=self.used_fields)", "def creates_view(self):\n return self.statements[0].creates_view()", "def compute_relations(self):\n\n visible_nodes = {}\n\n self.cameras = self.get_all_cameras()\n rospy.logdebug(self.cameras)\n\n if self.cameras.items():\n try:\n if self.visibility_monitor is None:\n self.visibility_monitor = VisibilityMonitor(self.ctx, self.source)\n rospy.loginfo(\"[perspective_filter] Visibility monitor now running, please active the Pygame windows.\")\n visible_nodes = self.visibility_monitor.compute_all()\n rospy.logdebug(\"[perspective_filter] %d perspectives computed \" % len(visible_nodes))\n #rospy.logdebug(visible_nodes)\n except Exception as e:\n rospy.logwarn(\"[perspective_filter] Exception occurred while computing relation : %s\" % str(e))\n if self.visibility_monitor:\n self.visible_nodes = {} #visible_nodes\n for camera_name, visibles_obj in visible_nodes.items():\n camera_id = self.source.scene.nodebyname(camera_name)[0].id\n self.visible_nodes[camera_id] = visibles_obj\n for node in visibles_obj:\n if node.parent in self.cameras.keys():\n if self.source.scene.nodes[node.parent] not in visibles_obj:\n visibles_obj.append(self.source.scene.nodes[node.parent])\n\n for agent_id, nodes_seen in self.visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_seen:\n if agent_id in self.previously_visible_nodes:\n if node not in self.previously_visible_nodes[agent_id]:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.start_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n for agent_id, nodes_previously_seen in self.previously_visible_nodes.items():\n agent = self.source.scene.nodes[agent_id]\n for node in nodes_previously_seen:\n if agent_id in self.visible_nodes:\n if node not in self.visible_nodes[agent_id]:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n else:\n self.end_predicate(self.source.timeline, \"isVisibleBy\", node.name, object_name=agent.name)\n\n self.publish_perspectives()\n self.previously_visible_nodes = self.visible_nodes", "def make_query(self):", "def get_query():\n return CiscoVlanIftableRelationshipQuery", "def test_launch_model_link_query_2(self):\n with app.test_request_context():\n launch1 = db.session.query(Launch).first()\n mission1 = launch1.mission\n self.assertEqual(mission1.name, \"Vostok 1\")", "def add_visit():\n\n # checks to see if user is logged in\n\n if session.get('username'):\n username = session['username']\n user = User.query.filter_by(username=username).first()\n\n # finds the friend searched for on the database\n friend = request.args.get(\"friend\")\n friend_user = User.query.filter_by(username=friend).first()\n\n when = request.args.get(\"when\")\n user_rating = Decimal(request.args.get(\"rating\"))\n\n # finds the restaurant's ID, adds the restaurant to the database if not in yet\n restaurant = request.args.get(\"name\")\n yelp_id = request.args.get(\"id\")\n avg_rating = request.args.get(\"avg_rating\")\n price_lvl = request.args.get(\"price\")\n review_count = request.args.get(\"rc\")\n categs = request.args.get(\"categs\")\n list_categs = categs.split(\",\")\n\n if not Restaurant.query.filter_by(name=restaurant).all():\n new_restaurant = Restaurant(yelp_id=yelp_id,\n name=restaurant,\n rating=avg_rating,\n price=turn_to_nums(price_lvl),\n review_count=review_count)\n db.session.add(new_restaurant)\n db.session.commit()\n\n rest_id = db.session.query(Restaurant.id).filter_by(yelp_id=yelp_id).first()[0]\n if not Category.query.filter_by(rest_id=rest_id).all():\n if len(list_categs) == 3:\n categ1, categ2, categ3 = list_categs\n elif len(list_categs) == 2:\n categ1, categ2 = list_categs\n categ3 = None\n else:\n categ1 = list_categs\n categ2 = None\n categ3 = None\n new_categs = Category(rest_id=rest_id,\n categ1=categ1,\n categ2=categ2,\n categ3=categ3)\n db.session.add(new_categs)\n db.session.commit()\n\n # Adding to the visits and uservisits tables\n new_visit = Visit(rest_id=rest_id, date=when)\n db.session.add(new_visit)\n db.session.commit()\n new_visit_id = db.session.query(Visit.id).filter_by(rest_id=rest_id,\n date=when).order_by(Visit.date.desc()).first()[0]\n new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=user.id,\n rating=user_rating)\n f_new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=friend_user.id)\n db.session.add(new_visit_exp)\n db.session.add(f_new_visit_exp)\n db.session.commit()\n return \" <span class='label label-success'>Saved!</span>\"\n\n # if not logged in, cannot save\n else:\n return \" <a href='/login'><span class='label label-default'>Login to save</span></a>\"", "def fetch_from_sqlite(self):\n conn = get_sqlite()\n c = conn.cursor()\n c.execute('SELECT * FROM vertices ORDER BY id')\n vertices =c.fetchall()\n c.execute('SELECT * FROM edges')\n edges =c.fetchall()\n conn.commit()\n\n self.graph.add_vertices(len(vertices))\n for one in vertices:\n id =int(one[0])\n self.graph.vs[id][\"name\"] = one[1]\n self.graph.vs[id][\"parent\"] = one[2]\n self.graph.vs[id][\"size\"] = one[3]\n self.graph.vs[id][\"last_modified\"] = one[4]\n self.graph.vs[id][\"last_accessed\"] = one[5]\n\n for one in edges:\n self.graph.add_edges([(one[0],one[1])])", "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()", "def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)", "def perform_create(self, serializer):\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n lookup = self.kwargs.get(lookup_url_kwarg, None)\n \n if lookup is not None:\n filter_kwargs = {self.lookup_field: lookup}\n video = get_object_or_404(Video, **filter_kwargs)\n clip = serializer.save(video=video, owner=self.request.user) \n \n # register this new activity\n activity_send(self.request.user, verb='add', object=clip, target=video)\n else:\n raise Http404", "def test_relation_way_inserted():\n park = query_row(db_conf, 'osm_landusages', -8001)\n assert park['type'] == 'park'\n assert park['name'] == 'rel 8001'\n assert query_row(db_conf, 'osm_roads', 8009)[\"type\"] == 'residential'", "def index(self):\n logging.getLogger(__name__).info('Indexing')\n cur = self._conn.cursor()\n cur.executescript(\"\"\"\n CREATE INDEX IF NOT EXISTS vertices_submitter_id ON vertices(submitter_id);\n CREATE UNIQUE INDEX IF NOT EXISTS edges_src_dst ON edges(src, dst, src_name, dst_name);\n CREATE INDEX IF NOT EXISTS edges_dst ON edges(dst);\n \"\"\")\n self._conn.commit()", "def test_agency_model_link_query_1(self):\n with app.test_request_context():\n agency1 = db.session.query(Agency).filter_by(name=\"SpaceX\").first()\n launch1 = agency1.launches[0]\n self.assertEqual(launch1.rocket, \"Saturn V\")", "def create_session(self):\n # TODO refactor bids_import pipeline to use same functions as dcm2bids below. To be done in different PR though\n if self.verbose:\n print(\"Creating visit \" + self.visit_label\n + \" for CandID \" + self.cand_id)\n\n column_names = ('CandID', 'Visit_label', 'CenterID', 'Current_stage')\n values = (self.cand_id, self.visit_label, str(self.center_id), 'Not Started')\n\n if self.project_id:\n column_names = column_names + ('ProjectID',)\n values = values + (str(self.project_id),)\n\n if self.cohort_id:\n column_names = column_names + ('CohortID',)\n values = values + (str(self.cohort_id),)\n\n self.db.insert(\n table_name='session',\n column_names=column_names,\n values=values\n )\n\n loris_session_info = self.get_session_info_from_loris()\n\n return loris_session_info", "def post(self, request, *args, **kwargs):\n role = self.get_object()\n signals.user_relation_added.send(sender=__name__,\n role=role, reason=None, request_user=request.user)\n serializer = self.get_serializer(role)\n return Response(serializer.data)", "def add_relation(cls, row_id, rel_obj):\n obj = cls.query.filter_by(id=row_id).first()\n # obj = db.session.query(cls).filter_by(id=row_id).first()\n #print(type(obj))\n if cls.__name__ == 'Actor':\n obj.filmography.append(rel_obj)\n elif cls.__name__ == 'Movie':\n obj.cast.append(rel_obj)\n return commit(obj)", "def visit_entity(self, entity):", "def save_pr(self):\r\n\t\tsql = \"drop table if exists pagelink\"\r\n\t\tself.cur.execute(sql)\r\n\t\tsql = \"create table pagelink(urlid integer, fromids text, toids text, pagerank real)\"\r\n\t\tself.cur.execute(sql)\r\n\t\tfor urlid in self.url_ids:\r\n\t\t\tfromids = ' '.join([str(v) for v in self.from_ids[urlid]])\r\n\t\t\ttoids = ' '.join([str(v) for v in self.to_ids[urlid]])\r\n\t\t\tsql = \"insert into pagelink values(%d,'%s','%s',%f)\" \\\r\n\t\t\t\t % (urlid, fromids, toids, self.all_scores[urlid])\r\n\t\t\tself.cur.execute(sql)\r\n\t\tself.conn.commit()", "def post(self, request, *args, **kwargs):\n frompath = urlparse(request.DATA.get('from_person')).path\n topath = urlparse(request.DATA.get('to_person')).path\n\n #print(request.DATA)\n if type(frompath) is str and type(topath) is str:\n frompath_elements = frompath.split('/')\n topath_elements = topath.split('/')\n else:\n return Response({'error: invalid data'}, status=status.HTTP_400_BAD_REQUEST)\n\n fromPerson = get_object_or_404(Person, username=frompath_elements[-2])\n toPerson = get_object_or_404(Person, username=topath_elements[-2])\n count = Relationship.objects.filter(from_person=fromPerson, to_person=toPerson).count()\n\n #Reject a request to create Relationship with self\n if request.user.person.username == toPerson.username or count > 0:\n return Response({'error: Relationship with self not permitted'}, status=status.HTTP_400_BAD_REQUEST)\n\n if request.user.person.username == fromPerson.username or request.user.is_staff:\n return self.create(request, *args, **kwargs)\n return Response({'error': 'from_user does not match authenticated User'}, status=status.HTTP_400_BAD_REQUEST)", "def populate_graph(self):", "def cypher_create():\n graph.cypher.execute(\"CREATE (a:Person {name:{N}})\", {\"N\": \"yangyy\"})", "def post(self, *args, **kwargs):\n json_data = request.get_json()\n\n relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()\n\n if 'data' not in json_data:\n raise BadRequest('/data', 'You must provide data with a \"data\" route node')\n if isinstance(json_data['data'], dict):\n if 'type' not in json_data['data']:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in json_data['data']:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if json_data['data']['type'] != related_type_:\n raise InvalidType('/data/type', 'The type field does not match the resource type')\n if isinstance(json_data['data'], list):\n for obj in json_data['data']:\n if 'type' not in obj:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in obj:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if obj['type'] != related_type_:\n raise InvalidType('/data/type', 'The type provided does not match the resource type')\n\n self.before_post(args, kwargs, json_data=json_data)\n\n obj_, updated = self._data_layer.create_relationship(json_data,\n model_relationship_field,\n related_id_field,\n kwargs)\n\n qs = QSManager(request.args, self.schema)\n includes = qs.include\n if relationship_field not in qs.include:\n includes.append(relationship_field)\n schema = compute_schema(self.schema, dict(), qs, includes)\n\n if updated is False:\n return '', 204\n\n result = schema.dump(obj_).data\n if result.get('links', {}).get('self') is not None:\n result['links']['self'] = request.path\n self.after_post(result)\n return result, 200", "def createGettingVaccine(tx, query, personId, vaccineId, date, country, expDate):\n tx.run(query, personId=personId, vaccineId=vaccineId, date=date, country=country, expDate=expDate)" ]
[ "0.5515213", "0.5494762", "0.54500055", "0.52304965", "0.4951349", "0.4898447", "0.4882028", "0.4859343", "0.48130867", "0.48119572", "0.47934443", "0.47831184", "0.47791302", "0.47693402", "0.47674215", "0.47397813", "0.47124368", "0.46955422", "0.4681213", "0.46706474", "0.46690166", "0.4625133", "0.46205637", "0.46194923", "0.4618868", "0.4579102", "0.45677254", "0.45602193", "0.45589536", "0.45345297" ]
0.58031064
0
Method that finds all the positive person
def findAllPositivePerson(): query = ( """ MATCH (p:Person)-[t:MAKE_TEST{result: \"Positive\"}]->() WHERE NOT EXISTS { MATCH (p)-[t2:MAKE_TEST{result: \"Negative\"}]->() WHERE t2.date > t.date } RETURN distinct ID(p) , t.date as infectionDate , t.hour as infectionHour """ ) positiveIdsFound = runQueryRead(driver, query) return positiveIdsFound
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_candidates(self) -> list:", "def _act_find_people(self, global_env):\n # if there are people in this node\n people_in_location = global_env.get_attr(self.location, \"people\")\n if people_in_location > 0:\n print(\"picked up: {} people\".format(people_in_location))\n # pickup people\n self.people_carried += people_in_location\n global_env.change_attr(self.location, \"people\", 0)\n self.change_state(\"find_shelter\")\n self.act(global_env)\n\n else:\n self._act_find(global_env, \"people\")", "def get_people(team):", "def _find_homeless_mps(self):\n mps = Person.objects.filter(\n active=True,\n house__name=HOUSE_OF_COMMONS,\n constituency=None,\n )\n\n self.stdout('MPs with missing constituency:')\n for mp in mps:\n self.stdout(f' [{mp.parliamentdotuk}] {mp.name} has no constituency')", "def get_people(self, letter = None):\n if letter:\n people = Person.objects.filter(member_of__entity__in = self.get_descendants(include_self = True), surname__istartswith = letter).distinct().order_by('surname', 'given_name', 'middle_names')\n else: \n people = Person.objects.filter(member_of__entity__in = self.get_descendants(include_self = True)).distinct().order_by('surname', 'given_name', 'middle_names')\n return people", "def get_all_positive_conversations(individual_dir=\"/bos/tmp10/hongqiay/mutual_pretrain/individual\"):\n print(\"Gathering positive conversations...\")\n all_positive_conversations = []\n ngram_counts = Counter()\n for fname in os.listdir(individual_dir):\n with open(os.path.join(individual_dir, fname), \"r\") as f:\n for l in f:\n conversation = json.loads(l)\n conversation_text = \"\".join(conversation)\n if conversation_text == \"\":\n continue\n tokens = nltk.word_tokenize(conversation_text)\n tri_tokens = trigrams(tokens)\n for trigram in set(tri_tokens):\n ngram_counts[trigram] += 1\n all_positive_conversations.append(conversation)\n min_count = ngram_counts.most_common()[-1][1]\n max_count = ngram_counts.most_common()[0][1]\n min_idf = math.log2(len(all_positive_conversations) / max_count)\n max_idf = math.log2(len(all_positive_conversations) / min_count)\n for k, v in ngram_counts.items():\n ngram_counts[k] = nidf(v, len(all_positive_conversations), min_idf, max_idf) # now stores nidf\n return all_positive_conversations, ngram_counts", "def test_no_duplicates_and_positives_in_negative_sample(self):\n model = PoincareModel(self.data_large, negative=3)\n positive_nodes = model.node_relations[0] # Positive nodes for node 0\n num_samples = 100 # Repeat experiment multiple times\n for i in range(num_samples):\n negatives = model._sample_negatives(0)\n self.assertFalse(positive_nodes & set(negatives))\n self.assertEqual(len(negatives), len(set(negatives)))", "def find_roots(people):\n filtered_people = set()\n for person in people:\n if not set(person.parents).intersection(people):\n filtered_people.add(person)\n return filtered_people", "def get_negatives(self):\n return (self.serie < 0).sum()", "def getPersons():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE NOT username = (SELECT username FROM users WHERE id = ?)\", [user_id])\n tempPersons = cur.fetchall()\n persons = []\n for person in tempPersons:\n persons.append(person[0])\n persons.sort()\n return persons", "def getNotMyCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() != 1):\n r.append(p)\n return r", "def find_all(self):", "def get_false_positives(detections, faces):\n false_positives = []\n for detection in detections:\n is_positive = False\n for face in faces:\n if intersection_ratio(detection, face) > 0.5:\n is_positive = True\n break\n if not is_positive:\n false_positives.append(detection)\n\n return false_positives", "def get_all_persons(self):\r\n return self.__person_repository.elements", "def getNeutralCamps(self):\n r = []\n for p in self.__camps:\n if(p.getOwner() == 0):\n r.append(p)\n return r", "def get_members_with_samples(self):\n # TODO: unit test me\n return sorted([x[\"patient\"] for x in self.get_filtered_pedigree_with_samples()])", "def get_members(self):\n return sorted([x[\"patient\"] for x in self.get_filtered_pedigree_with_samples()])", "def find_people(self, name=''):\n ## fixme -- can this query be combined?\n ## like this: db.inventory.find( { $or: [ { qty: { $lt: 20 } }, { sale: true } ] } )\n\n cursor = self.people.find({\"first_name\": {'$regex' : '.*' + name + '.*',\n '$options':'i'}})\n results = [Person.from_dict(p) for p in cursor]\n\n cursor = self.people.find({\"last_name\": {'$regex' : '.*' + name + '.*',\n '$options':'i'}})\n\n return results + [Person.from_dict(p) for p in cursor]", "def main():\n for num in range(372304, 847061):\n if meets_criteria2(num):\n print (num)\n\n meet_criteria = [meets_criteria2(x) for x in range(372304, 847061)]\n print (sum(meet_criteria))", "def testNoSpecialties(self):\n self.failUnlessEqual(self.person.getSpecialties(), [])", "def succ(self):\n return [ self.simple_reflection(i) for i in self.descents(positive=True) ]", "def all_manslaughter(x): \n for elem in x:\n if elem == 'Manslaughter' or elem == 'Voluntary Manslaughter':\n return 1\n return 0", "def _check_personal_pronouns(pronoun: str, last_nouns: list) -> list:\n pronoun_details = []\n pronoun_lower = pronoun.lower()\n if pronoun == 'I' or pronoun_lower in ('me', 'myself', 'my'):\n pronoun_details.append(('Narrator', 'SINGPERSON', ':Person', ':Narrator'))\n elif pronoun_lower in ('we', 'us', 'ourselves', 'our'):\n # Find singular or plural person nouns (any gender)\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, None, None, True))\n pronoun_details.append(('Narrator', 'SINGPERSON', ':Person', ':Narrator'))\n elif pronoun_lower in ('they', 'them', 'themselves', 'their'):\n # Give preference to persons (any gender or number)\n noun_list = _check_criteria(pronoun_lower, last_nouns, None, None, True)\n if noun_list:\n pronoun_details.extend(noun_list)\n else:\n # Check for non-persons\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, None, None, False))\n elif pronoun_lower in ('she', 'herself', 'her'):\n # Find singular, feminine, person nouns\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, True, True, True))\n elif pronoun_lower in ('he', 'himself', 'him'):\n # Find singular, masculine, person nouns\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, True, False, True))\n elif pronoun_lower in ('it', 'itself', 'its'):\n # Find singular, non-person nouns (no gender)\n pronoun_details.extend(_check_criteria(pronoun_lower, last_nouns, True, None, False))\n final_details = [] # May be duplicates in the list due to duplicates in last_nouns\n for pronoun_detail in pronoun_details:\n if pronoun_detail in final_details:\n continue\n final_details.append(pronoun_detail)\n return final_details", "def appears(self):", "def test_positive_electrode_potential_profile(self):\n\n # TODO: add these when have averages", "def find_all(self):\n pass", "def college_selectivity():", "def findSuggestions():\n users = None\n if current_user.genderPreferences == \"any\":\n users = User.query.filter(or_(User.genderPreferences==current_user.gender, User.genderPreferences=='any'), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n elif current_user.genderPreferences == \"male\":\n users = User.query.filter(or_(User.gender==\"male\", User.gender==\"other\"), or_(User.genderPreferences==current_user.gender, User.genderPreferences==\"any\"), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n elif current_user.genderPreferences == \"female\":\n users = User.query.filter(or_(User.gender==\"female\", User.gender==\"other\"), or_(User.genderPreferences==current_user.gender, User.genderPreferences==\"any\"), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n show_users = []\n print(users)\n for user in users:\n if (not user in current_user.likes) and (not user in current_user.dislikes):\n show_users.append(user)\n print(show_users)\n return show_users", "def find_positive(self):\n if self.round == 2:\n pass\n \n elif self.subtested == 1:\n try:\n dim = self.D-1\n sample = range(1, int(self.poolSize)+1)\n self.SLICES = self.partRes\n dim_positive_slices = itemgetter(*self.results.keys())(self.results)\n dim_positive_slices_count = list(map(len,dim_positive_slices))\n one_pos_slice_count = dim_positive_slices_count.count(1)\n two_pos_slice_count = dim_positive_slices_count.count(2)\n three_pos_slice_count = dim_positive_slices_count.count(3)\n if one_pos_slice_count == dim:\n positive_slice_samples = [self.SLICES[keys][value] for keys in self.results.keys() for value in self.results[keys]]\n self.positiveSamples.setText('; '.join(str(s) for s in set.intersection(*positive_slice_samples)))\n return set.intersection(*positive_slice_samples)\n \n elif (one_pos_slice_count == dim-1) and (two_pos_slice_count == 1 or three_pos_slice_count ==1):\n positive_slice_samples = [itemgetter(*self.results[key])(self.SLICES[key]) \n if len(self.results[key])==1 else set.union(*itemgetter(*self.results[key])(self.SLICES[key])) \n for key in self.results.keys()]\n self.positiveSamples.setText('; '.join(str(s) for s in set.intersection(*positive_slice_samples)))\n\n else:\n self.positiveSamples.setText('Indeterministic')\n except:\n pass\n else:\n try:\n dim = self.D\n sample = range(1, int(self.poolSize)+1)\n self.SLICES = self.slicedCube\n dim_positive_slices = itemgetter(*self.results.keys())(self.results)\n dim_positive_slices_count = list(map(len,dim_positive_slices))\n one_pos_slice_count = dim_positive_slices_count.count(1)\n two_pos_slice_count = dim_positive_slices_count.count(2)\n three_pos_slice_count = dim_positive_slices_count.count(3)\n if one_pos_slice_count == dim:\n positive_slice_samples = [self.SLICES[keys][value] for keys in self.results.keys() for value in self.results[keys]]\n self.positiveSamples.setText('; '.join(str(s) for s in set.intersection(*positive_slice_samples)))\n return set.intersection(*positive_slice_samples)\n \n elif (one_pos_slice_count == dim-1) and (two_pos_slice_count == 1 or three_pos_slice_count ==1):\n positive_slice_samples = [itemgetter(*self.results[key])(self.SLICES[key]) \n if len(self.results[key])==1 else set.union(*itemgetter(*self.results[key])(self.SLICES[key])) \n for key in self.results.keys()]\n self.positiveSamples.setText('; '.join(str(s) for s in set.intersection(*positive_slice_samples)))\n\n else:\n self.positiveSamples.setText('Indeterministic: \\n Proceed to sub- \\n directional testing')\n self.labelsCube = self.labelledCube()\n self.subTest()\n self.sliceSelect.clear()\n self.sliceSelect.addItems(self.res)\n if self.round == 1:\n self.round = 2\n else:\n self.round = 3\n except:\n pass", "def filter_positive_detections(detections):\n class_idx = 0\n assert(isinstance(detections, mx.nd.NDArray) or isinstance(detections, np.ndarray))\n detections_per_image = []\n # for each image\n for i in range(detections.shape[0]):\n result = []\n det = detections[i, :, :]\n for obj in det:\n if obj[class_idx] >= 0:\n result.append(obj)\n detections_per_image.append(result)\n logging.info(\"%d positive detections\", len(result))\n return detections_per_image" ]
[ "0.5554053", "0.5459844", "0.54469377", "0.5361949", "0.53258157", "0.52789694", "0.52775156", "0.5253702", "0.5251319", "0.5248255", "0.5231792", "0.5211129", "0.52076685", "0.5206079", "0.517138", "0.5147965", "0.51300406", "0.50793165", "0.5072078", "0.50638425", "0.50197804", "0.500946", "0.50052404", "0.5002412", "0.49952397", "0.49816376", "0.49768004", "0.49588287", "0.49501127", "0.49477434" ]
0.7193914
0
Method that deletes exposure for people who made a negative test after a covid exposure
def delete_negative_after_exposure(): query = ("match ()-[c:COVID_EXPOSURE]->(p)-[m:MAKE_TEST{result:\"Negative\"}]->(t) " "where m.date >= c.date + duration({days: 7}) " "delete c") with driver.session() as session: session.run(query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_exposure(self, expid):\n\n Exposure.objects.filter(exposure_id=expid).delete()", "def expense(self):\n del self._expense", "def test_expense_deletion(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n rv = self.client().post(\n '/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(rv.status_code, 201)\n res = self.client().delete('/expenses/1', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n # Test to see if it exists, should return a 404\n result = self.client().get('/expenses/1', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def remove_bad_experience(self):\n average = self.average()\n to_delete = []\n for i, t in enumerate(self.memory):\n if t.reward < average:\n to_delete.append(i)\n f = 0 \n for d in to_delete:\n del self.memory[d-f]\n f += 1", "def test_delete_risk_profile_using_delete(self):\n pass", "def delete_demo(exploration_id):\n exploration = get_exploration_by_id(exploration_id, strict=False)\n if not exploration:\n # This exploration does not exist, so it cannot be deleted.\n logging.info('Exploration with id %s was not deleted, because it '\n 'does not exist.' % exploration_id)\n else:\n delete_exploration(ADMIN_COMMITTER_ID, exploration_id)", "def test_client_risk_assessment_delete(self):\n pass", "def test_delete_assessment(self):\n response = self.user_02.delete(self.assessment_custom_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.supervisor_formal.delete(self.assessment_custom_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.convener.delete(self.assessment_custom_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_delete_assessment(self):\n response = self.user_01.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.user_02.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.supervisor_formal.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n response = self.convener.delete(self.assessment_report_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def eye_cleanup(input_frame):\n f=input_frame\n f=f[f['facekeypressed']!='.']\n f=f[f['practice'].astype(int)>2]\n return f", "def test_delete_occurrence(self):\n pass", "def delete(damage_id):\n logged_in_user = g.user\n damage_id = str(damage_id)\n\n deleted_damage = libdamage.delete_damage(damage_id=damage_id, \n logged_in_user=logged_in_user)\n\n rci_id = deleted_damage['rci_id']\n\n return redirect(url_for('rci.edit', rci_id=rci_id))", "def test_remove_expensive(self):\n test_remove_expensive = self.info_list.remove_expensive()\n self.assertTrue(test_remove_expensive)", "def test_negative_conditions(self):\r\n outline_url = reverse_course_url('course_handler', self.course.id)\r\n # register a non-staff member and try to delete the course branch\r\n non_staff_client, _ = self.create_non_staff_authed_user_client()\r\n response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')\r\n self.assertEqual(response.status_code, 403)", "def cure(self, s):\n if self.disease_status == 1:\n s.number_of_symptomatic -= 1\n elif self.disease_status == 2:\n s.number_of_asymptomatic -= 1\n elif self.disease_status == 3:\n s.number_of_res_symp -= 1\n elif self.disease_status == 4:\n s.number_of_res_asymp -= 1\n if self.disease_status > 0:\n s.infected.remove(self.identifier)\n if self.disease_status > 2:\n s.resistant.remove(self.identifier)\n self.disease_status = 0\n self.time_since_infection = -1", "def treatInfections(self, amount, disease):\r\n if disease in self.city.diseaseCounts:\r\n self.city.diseaseCounts[disease] -= amount\r\n disease.addCubes(amount)", "def delete(self, expense_id):\n return DeleteExpense(current_user.id, expense_id)", "def delete_demos():\n for index in range(len(feconf.DEMO_EXPLORATIONS)):\n delete_demo(str(index))", "def clean_exam():\n data = Exam.objects.all()\n data.delete()", "def test_offensive_degenerate_case(self):\n from parlai.scripts.detect_offensive_language import DetectOffensive\n\n report = DetectOffensive.main(\n task='integration_tests:overfit', safety='all', mutators='degenerate'\n )\n assert report['classifier_offenses%'] == 0\n assert report['exs'] == 4", "def test_delete_experiment(client, users):\n login_experimenter(client)\n\n exp = ExperimentFactory()\n exp.save()\n\n exp_url = \"/experiments/\" + str(exp.id)\n\n response = client.delete(exp_url)\n assert response.status_code == 200\n assert json_success(response.data)\n\n response = client.get(\"/experiments/\")\n data = response.data.decode(response.charset)\n assert response.status_code == 200\n assert exp.name not in data", "def reject_test(self):\n self.__genes_test = None\n self.__fitness_test = None", "def test_impact_for_exp_with_no_answers(self):\n # Sign up a user and have them create an exploration.\n user_a_id = self._sign_up_user(\n self.USER_A_EMAIL, self.USER_A_USERNAME)\n exploration = self._create_exploration(self.EXP_ID_3, user_a_id)\n self._rate_exploration(exploration.id, 5, 3)\n self._run_computation()\n user_stats_model = user_models.UserStatsModel.get(user_a_id)\n self.assertEqual(user_stats_model.impact_score, 0)", "def delete(self, expense_id):\n url = base_url + expense_id\n resp = zoho_http_client.delete(url, self.details, self.headers)\n return parser.get_message(resp)", "def remove_favor(self):\n org = self.get_organization()\n target = self.caller.search(self.rhs)\n if not target:\n return\n try:\n rep = target.Dominion.reputations.get(organization=org)\n except Reputation.DoesNotExist:\n raise CommandError(\"They have no favor with %s.\" % org)\n rep.wipe_favor()\n self.msg(\"Favor for %s removed.\" % target)", "def delete_specimen(specimen_id):\n\n specimen = Specimen.query.get_or_404(specimen_id)\n\n if current_user.id == specimen.user_id:\n\n db.session.delete(specimen)\n db.session.commit()\n\n flash(\"Specimen deleted!\", \"success\")\n return redirect(f\"/user/{current_user.id}\")\n else:\n return (\"\", 403)", "def deleteFood(self,x,y): \n self.environment[x][y] = 0 # the value of the case change \n if self.display:\n self.can.delete(self.foodText[(x,y)]) # delete the food text from the simulators ", "def decrement_notice(self):\n\t\tassert not any(self.feat_time_left==0)\n\t\tself.feat_time_left[self.feat_time_left>0]-=1\n\t\tbool_feats_to_remove=(self.feat_time_left==0)\n\t\tself.remove_feats(bool_feats_to_remove)", "def _delete_cves(self):\n logger.info(\"Deletion of data begins\".center(50, '-'))\n dry_run = self.helper.is_dry_run()\n if dry_run:\n logger.info(\"Dry run mode is on. No ingestion will take place\".center(30, '-'))\n del_obj = {'id': ''}\n for eco in SUPPORTED_ECOSYSTEMS:\n if eco in self.DELETE_CVE_DATA:\n logger.info(\"Deleting false positive {} CVEs...\".format(eco))\n if len(self.DELETE_CVE_DATA[eco]) > 0:\n for vuln in self.DELETE_CVE_DATA[eco]:\n logger.info(\"Deleting {}\".format(vuln['id']))\n del_obj['id'] = vuln['id']\n if not dry_run:\n resp = self.helper.make_api_call(del_obj, 'DELETE')\n self.SNYK_REPORT['details'][eco]['delete'][vuln['id']]['status'] = resp\n logger.info(\"Waiting for 1 second\".center(30, '-'))\n time.sleep(1)\n else:\n logger.info(\"Nothing to delete for {} CVEs...\".format(eco))\n else:\n logger.info(\"Nothing to delete for {} CVEs...\".format(eco))\n logger.info(\"Deletion of data ends\".center(50, '-'))\n return True", "def remove_unimproved_species(self):\n for spec_num, spec in list(self.species.items()):\n if self.gen_num - spec.gen_last_improved > self.species_dropoff_age:\n self.species.pop(spec_num)" ]
[ "0.6750642", "0.5988316", "0.592803", "0.58590585", "0.58475167", "0.58293736", "0.5802172", "0.5726755", "0.57198924", "0.56072134", "0.5561569", "0.543649", "0.54000187", "0.53796333", "0.5374287", "0.537308", "0.5327827", "0.5325508", "0.5318972", "0.5311366", "0.53023356", "0.5301029", "0.52723765", "0.52583486", "0.5246377", "0.5240229", "0.52279085", "0.52174664", "0.51896423", "0.518704" ]
0.7527944
0
Method that executes the query to find the infected member of a family
def findInfectInFamily(tx, query, id): result = tx.run(query, id=id).data() return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def family(self):", "def load_family_members():\n\n Member.query.delete()\n\n for row in open('data/seed_data_sample_plain'):\n strip_row = row.strip()\n split_row = strip_row.split('|')\n\n member_id = split_row[0].strip()\n\n first_name = split_row[1].strip()\n\n last_name = split_row[2].strip()\n\n if split_row[3].strip() is not None:\n eng_title = split_row[3].strip()\n else:\n eng_title = None\n\n if split_row[4].strip() is not None:\n alt_name = split_row[4].strip()\n else:\n alt_name = None\n\n if split_row[5].strip() is not None:\n lineage = split_row[5].strip()\n else:\n lineage = None\n\n if split_row[6].strip() == 1:\n deceased = split_row[6].strip()\n else:\n deceased = 0\n\n if split_row[7].strip() is not None:\n image_url = split_row[7].strip()\n else:\n image_url = None\n\n if split_row[8].strip() is not None:\n parents = split_row[8].strip()\n else:\n parents = None\n\n if split_row[9].strip() is not None:\n string_list_of_child_member_ids = split_row[9].strip() # produces a string\n list_of_child_member_ids = string_list_of_child_member_ids.split() # produces a list from the string\n\n children = [Member(member_id=int(num)) for num in list_of_child_member_ids]\n else:\n children = None\n\n if split_row[10].strip() is not None:\n string_list_of_spouse_member_ids = split_row[10].strip()\n list_of_spouse_member_ids = string_list_of_spouse_member_ids.split()\n\n spouse = [Member(member_id=int(num)) for num in list_of_spouse_member_ids]\n else:\n spouse = None\n\n try:\n member = Member(member_id=member_id,\n first_name=first_name,\n last_name=last_name,\n eng_title=eng_title,\n alt_name=alt_name,\n lineage=lineage,\n deceased=deceased,\n image_url=image_url,\n parents=parents,\n children=children,\n spouse=spouse)\n\n db.session.add(member)\n except:\n import pdb; pdb.set_trace()\n\n db.session.commit()", "def _possible_family_meeting(self, reference, family):\n dbstate = self.dbstate\n try:\n person = dbstate.db.get_person_from_handle(reference.get_father_handle())\n except:\n return\n if person is None: # family without father ?\n person = dbstate.db.get_person_from_handle(reference.get_mother_handle())\n if person is None:\n person = dbstate.db.get_person_from_handle(self.uistate.get_active('Person'))\n if person is not None:\n family_list = person.get_family_handle_list()\n if len(family_list) > 0:\n fhandle = family_list[0] # first is primary\n fam = dbstate.db.get_family_from_handle(fhandle)\n handle = fam.get_father_handle()\n father = dbstate.db.get_person_from_handle(handle)\n if father:\n self._expose_persone_to_family(father, family)\n handle = fam.get_mother_handle()\n mother = dbstate.db.get_person_from_handle(handle)\n if mother:\n self._expose_persone_to_family(mother, family)\n child_ref_list = fam.get_child_ref_list()\n if child_ref_list:\n for child_ref in child_ref_list:\n child = dbstate.db.get_person_from_handle(child_ref.ref)\n if child:\n self._expose_persone_to_family(child, family)\n else:\n self._expose_persone_to_family(person, family)", "async def examine(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n search_string = ' '.join(args).lower()\n for member in ctx.guild.members:\n if member.nick is not None:\n if search_string in member.nick.lower():\n target = User.objects.get(id=member.id)\n break\n if search_string in member.name.lower():\n target = User.objects.get(id=member.id)\n break\n else:\n await ctx.send(f'Could not find {search_string} in server.')\n return\n\n await ctx.send(users.print_account(target))", "def find_member(message, nickname):\n for member in message.guild.members:\n if nickname in member.display_name:\n return member", "def display_family(self, family):\n for child_ref in family.get_child_ref_list():\n child = self.dbstate.db.get_person_from_handle(child_ref.ref)\n self.add_child(child)\n self.set_has_data(self.model.count > 0)", "def selectFamily(self, obj):\n self.track = []\n self.skip_list = []\n self.ref_family = None\n self.reffamily_bookmark = None\n selectFamily = SelectorFactory('Family')\n sel = selectFamily(self.dbstate, self.uistate)\n self.reffamily = sel.run()\n self.goto_handle(None)", "def get_family_by_id(family_id):\n family = User.es.search(family_id)\n return family", "def displayFolowers(database):\n firstname=str(input(\"who do you want to display followers :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(f\"{usr.firstname} {usr.lastname} is folowed by:\")\n for folower in usr.folowed:\n print(folower)", "async def inf_search(self, ctx,\n\t\ttarget: Union[BanCandidateConverter, int]\n\t):\n\n\t\tMAX = 10\n\t\tTS_FORMAT = \"%d/%m/%y %H:%M:%S\"\n\n\t\tasync with self.bot.postgres.acquire() as con:\n\t\t\tif isinstance(target, (discord.Member, discord.User)):\n\t\t\t\tsearch_id = target.id\n\t\t\t\tquery = \"\"\"SELECT id, target_id, actor_id, type_id, reason, created_at FROM infractions\n\t\t\t\t\t\t WHERE (target_id = $1 OR actor_id = $1) AND guild_id = $2\n\t\t\t\t\t\t ORDER BY id DESC;\"\"\"\n\n\t\t\telse:\n\t\t\t\tsearch_id = target\n\t\t\t\tquery = \"\"\"SELECT id, target_id, actor_id, type_id, reason, created_at FROM infractions\n\t\t\t\t\t\t WHERE id = $1 AND guild_id = $2;\"\"\"\n\n\t\t\tcases = await con.fetch(query, search_id, ctx.guild.id)\n\n\t\t\tif not cases:\n\t\t\t\traise LookupFailed(\"infractions\")\n\n\t\t\ttype_str = {\n\t\t\t\t0: \"ban\",\n\t\t\t\t1: \"kick\",\n\t\t\t\t2: \"mute\",\n\t\t\t\t3: \"warn\"\n\t\t\t}\n\n\t\t\tawait ctx.send(dedent(f\"\"\"🔎 Showing {len(cases[:MAX])}/{len(cases)} found infractions:\n\t\t\t\t```md\n\t\t\t\t{tabulate(\n\t\t\t\t\ttabular_data=[\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\tinfraction[\"id\"],\n\t\t\t\t\t\t\tclean_user(ctx.guild, infraction[\"target_id\"]),\n\t\t\t\t\t\t\tclean_user(ctx.guild, infraction[\"actor_id\"]),\n\t\t\t\t\t\t\ttype_str.get(infraction[\"type_id\"]),\n\t\t\t\t\t\t\tinfraction[\"reason\"],\n\t\t\t\t\t\t\tinfraction[\"created_at\"].strftime(TS_FORMAT)\n\t\t\t\t\t\t] for infraction in cases[:MAX]\n\t\t\t\t\t],\n\t\t\t\t\theaders=(\"ID\", \"Target\", \"Actor\", \"Type\", \"Reason\", \"Timestamp\"),\n\t\t\t\t\ttablefmt=\"simple\",\n\t\t\t\t\tnumalign=\"left\", \n\t\t\t\t\tstralign=\"left\"\n\t\t\t\t)}```\"\"\"))", "def createInfectFamily(tx, query, id, ipid, date):\n tx.run(query, id=id, ipid=ipid, date=date)", "def _expose_persone_to_family(self, ref_person, family):\n dbstate = self.dbstate\n try:\n person = dbstate.db.get_person_from_handle(family.get_father_handle())\n except:\n return\n if person is None: # family without father ?\n person = dbstate.db.get_person_from_handle(family.get_mother_handle())\n if person is not None:\n family_list = person.get_family_handle_list()\n if len(family_list) > 0:\n fhandle = family_list[0] # first is primary\n fam = dbstate.db.get_family_from_handle(fhandle)\n handle = fam.get_father_handle()\n father = dbstate.db.get_person_from_handle(handle)\n if father:\n self.possible_meeting(father, ref_person)\n handle = fam.get_mother_handle()\n mother = dbstate.db.get_person_from_handle(handle)\n if mother:\n self.possible_meeting(mother, ref_person)\n child_ref_list = fam.get_child_ref_list()\n if child_ref_list:\n for child_ref in child_ref_list:\n child = dbstate.db.get_person_from_handle(child_ref.ref)\n if child:\n self.possible_meeting(child, ref_person)\n else:\n self.possible_meeting(person, ref_person)", "def searchByName(database):\n firstname=str(input(\"What is his first name :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(usr)", "def find_member(self, search_str: str) -> 'dt_member.Member':\n sp = search_str.rsplit(\"#\", 1)\n if len(sp) == 1:\n # Member name only :(\n predicate = lambda member: member.user.name == sp[0] or member.nickname == sp[0]\n else:\n # Discriminator too!\n # Don't check nicknames for this.\n predicate = lambda member: member.user.name == sp[0] \\\n and member.user.discriminator == sp[1]\n\n filtered = filter(predicate, self.members.values())\n return next(filtered, None)", "def _act_find_people(self, global_env):\n # if there are people in this node\n people_in_location = global_env.get_attr(self.location, \"people\")\n if people_in_location > 0:\n print(\"picked up: {} people\".format(people_in_location))\n # pickup people\n self.people_carried += people_in_location\n global_env.change_attr(self.location, \"people\", 0)\n self.change_state(\"find_shelter\")\n self.act(global_env)\n\n else:\n self._act_find(global_env, \"people\")", "async def guild_infected(self, ctx, *, guild: discord.Guild = None):\n if not guild:\n guild = ctx.guild\n user_list = await self.config.all_users()\n infected_list = []\n for user, data in user_list.items():\n user = guild.get_member(user)\n if user:\n userState = data[\"gameState\"]\n if userState == \"infected\":\n infected_list.append(f\"{user.mention} - {user}\")\n if infected_list:\n infected_list = \"\\n\".join(infected_list)\n color = await ctx.embed_color()\n if len(infected_list) > 2000:\n embeds = []\n infected_pages = list(pagify(infected_list))\n for index, page in enumerate(infected_pages, start=1):\n embed = discord.Embed(color=color, title=\"Infected Members\", description=page)\n embed.set_footer(text=f\"{index}/{len(infected_pages)}\")\n embeds.append(embed)\n await menu(ctx, embeds, DEFAULT_CONTROLS)\n else:\n await ctx.send(\n embed=discord.Embed(\n color=color,\n title=\"Infected Members\",\n description=infected_list,\n )\n )\n else:\n await ctx.send(\"No one has been infected yet..\")", "def search_for_member(self, *, name: str = None, discriminator: str = None,\n full_name: str = None):\n if full_name is not None:\n sp = full_name.split(\"#\", 1)\n return self.search_for_member(name=sp[0], discriminator=sp[1])\n\n # coerce into a proper string\n if isinstance(discriminator, int):\n discriminator = \"{:04d}\".format(discriminator)\n\n for member in self._members.values():\n # ensure discrim matches first\n if discriminator is not None and discriminator != member.user.discriminator:\n continue\n\n if member.user.username == name:\n return member\n\n if member.nickname == name:\n return member", "def friend(tcp, udp, userId, data):\n\n # from server get address of potential friend\n tcp.sendMessage('SEARCH ' + data[0])\n address = tcp.receiveMessage().split()[-2:]\n address = (address[0], int(address[1]))\n\n # send friend request\n if address:\n udp.sendto('FRIEND ' + userId, address)\n print 'Sent friend request to ' + data[0]\n else: print 'Could not send friend request to ' + data[0]", "def get_family_by_aadhar(aadhar_no):\n users_list = User.es.search(aadhar_no)\n user_list = []\n for user in users_list:\n user_family_no = user['family_id']\n user_list = User.es.search(user_family_no)\n return user_list", "def show_families():\n print(f\"--- {request}\")\n print(f\"--- {user_session}\")\n # Set context by owner and the data selections\n u_context = UserContext(user_session, current_user, request)\n # Which range of data is shown\n u_context.set_scope_from_request(request, \"person_scope\")\n opt = request.args.get(\"o\", \"father\", type=str)\n u_context.order = \"man\" if opt == \"father\" else \"wife\"\n u_context.count = request.args.get(\"c\", 100, type=int)\n t0 = time.time()\n\n with FamilyReader(\"read\", u_context) as service:\n # 'families' has Family objects\n families = service.get_families(opt)\n\n stk_logger(u_context, f\"-> bp.scene.routes.show_families/{opt} n={len(families)}\")\n return render_template(\n \"/scene/families.html\",\n families=families,\n user_context=u_context,\n elapsed=time.time() - t0,\n )", "def getMember(unique_name):", "def getMember(unique_name):", "def test_02_GetFamily(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_2\n l_family = FamUtil.get_family(self.m_device_obj)\n # print(PrettyFormatAny.form(l_family, 'B3-02-A - Family'))\n self.assertEqual(l_family, TESTING_FAMILY_NAME_2)", "def info(self, membership, callback=None):", "def family(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"family\")", "def test_01_GetFamily(self):\n self.m_device_obj.DeviceFamily = TESTING_FAMILY_NAME_1\n l_family = FamUtil.get_family(self.m_device_obj)\n # print(PrettyFormatAny.form(l_family, 'B3-01-A - Family'))\n self.assertEqual(l_family, TESTING_FAMILY_NAME_1)", "def lookup_member(email, verbose=0):\n\n return lookup_osf(\n email=email, group_slug='foundation-members', verbose=verbose)", "def find_new_people(self):\n #greets people, only greets once while they're in the camera's view and are center of attention\n\n\n if (self.person is not None) and (self.person.acknowledged == False):\n self.person.acknowledged = True\n print \"I see you!\"\n self.idle_pub.publish(\"idle:stop\")\n time.sleep(2)\n\n greeting = [\"R_nudge\",\"R_look\"]\n for msg in greeting:\n self.behavior_pub.publish(msg)\n self.check_completion()\n\n\n self.detection_pub.publish('found')\n\n elif self.person is None:\n print \"I don't see you\"\n self.detection_pub.publish('nothing')", "def find_family(self, needle):\n return self.__make_api_call('find/family/{}'.format(needle))", "def searchByInterest(database):\n interest=str(input(\"What is his interest :\"))\n usrs,find=getByInterest(database,interest)\n for usr in usrs:\n print(usr)" ]
[ "0.5857511", "0.56663436", "0.5661443", "0.5598044", "0.54980147", "0.5473072", "0.53622675", "0.53591436", "0.52720773", "0.5226901", "0.52110434", "0.52074486", "0.51369727", "0.5127439", "0.51245797", "0.50654334", "0.50646216", "0.5057014", "0.5038735", "0.50198334", "0.5014983", "0.5014983", "0.50054836", "0.499116", "0.49847615", "0.49695623", "0.49572158", "0.49249834", "0.49058586", "0.49047554" ]
0.6700535
0
Method that retrieves all the ids of Person Node
def getPersonIds(withApp=False): with driver.session() as s: ids = s.write_transaction(getPersonId, withApp) pIds = [] for idEl in ids: pIds.append(idEl["ID(p)"]) return pIds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_ids(self):\r\n return self.__person_repository.get_all_ids()", "def findAllPerson(tx):\n query = (\n \"MATCH (p:Person) \"\n \"RETURN p , ID(p);\"\n )\n results = tx.run(query).data()\n return results", "def get_person_ids(self) -> np.ndarray:\n return self.person_ids", "def getIDs():", "def getPersonId(tx, withApp):\n if not withApp:\n query = (\n \"MATCH (p:Person) \"\n \"RETURN ID(p);\"\n )\n else:\n query = (\n \"MATCH (p:Person) \"\n \"WHERE p.app = \\\"True\\\" \"\n \"RETURN ID(p);\"\n )\n\n idsList = tx.run(query).data()\n return idsList", "def get_ids(self) -> List[str]:", "def node_ids(self):\n return [self.node_id]", "def get_node_ids(self):\n \n return self.node_ids", "def _node_ids(self, nodes=None): # this function comes from BaseCard.py\n if not nodes:\n nodes = self.nodes\n if isinstance(nodes[0], integer_types):\n node_ids = [node for node in nodes]\n else:\n node_ids = [node.nid for node in nodes]\n assert 0 not in node_ids, 'node_ids = %s' % (node_ids)\n return node_ids", "def get_all_persons(self):\r\n return self.__person_repository.elements", "def all_ids(self) -> Set[int]:\n return {node_id for _, (node_id, _) in self.nodes.items()}", "def _node_ids(self, nodes=None):\n if not nodes:\n nodes = self.nodes\n if isinstance(nodes[0], integer_types):\n return nodes\n return [node.nid for node in nodes]", "def get_plate_ids_from_node_ids(self, nodes: list[int]) -> list[int]:\n ids = []\n\n for k, v in vars(self).items():\n if v.nodes == nodes:\n ids.append(k)\n\n if len(ids) == 0:\n ids = None\n\n return ids", "def getIds(self) -> List[int]:\n return list(self.users.keys())", "def get_people(self):\n cursor = self.cur()\n cursor.execute('SELECT * FROM {tn} '.format(tn=\"person\"))\n all_people = cursor.fetchall()\n return all_people", "def get_persons(self):\n return self.person_list.model().get_person_list()", "def ids(self):\n return self.obj_to_id.values()", "def ids(self):\n return list(self._id_generator())", "def ids(self):\n return list(self._id_generator())", "def get_ids(self):\n return self._ids", "def get_node_ids(self, node_id):\n if node_id is not None:\n names = [node_id]\n else:\n names = []\n query_url = ('/api/node/class/fabricNode.json?'\n 'query-target-filter=eq(fabricNode.role,\"leaf\")')\n error_message = 'Could not get switch list from APIC.'\n nodes = self._get_query(query_url, error_message)\n for node in nodes:\n names.append(str(node['fabricNode']['attributes']['id']))\n return names", "def get_id_users(self):\n return self.execute(TABELLE['id_users']['select']['all'])", "def get_ids(self):\n return [item.id for item in self.items]", "def list_people():\n\n person_list = []\n for person in person_database:\n person_list.append(person)\n return person_list", "def identities(self, generator=False, **kwargs):\n g = self._iter(body=self._identities_iter(), **kwargs)\n if generator:\n return g\n\n return list(g)", "def get_uuids_in_node(self, node, project_id):\n program, project = project_id.split(\"-\", 1)\n\n try:\n res = self.paginate_query(node, project_id)\n uuids = [x[\"id\"] for x in res[\"data\"][node]]\n except:\n raise Gen3Error(\n \"Failed to get UUIDs in node '\"\n + node\n + \"' of project '\"\n + project_id\n + \"'.\"\n )\n\n return uuids", "def ids(self):\n return (x[\"_id\"] for x in self.document._meta.collection.find(self.spec, fields = (\"_id\",)))", "def ids(self):\n return self._ids", "def dump_trusted_identities():\n\n node_name = \"ala\"\n\n numberSubnodes = ens.numberSubnodes(node_name)\n id_list = []\n\n # Iterate for each node\n for i in range(numberSubnodes):\n\n # Get the subnode (in name_hash format)\n subnode_hash = ens.subnode(node_name, i)\n\n # Get the data for the subnode\n DID, name, DIDDocument, active = resolver.AlaDIDPublicEntity(\n node_hash=subnode_hash)\n\n identity = {\n \"DID\": DID,\n \"name\": name,\n \"node_hash\": subnode_hash.hex()\n }\n id_list.append(identity)\n \n return id_list", "def _get_matching_node_ids(self, node_name):\n try:\n with closing(self.connection) as con:\n with con:\n with closing(con.cursor()) as cursor:\n cursor.execute(\"\"\"\n SELECT id\n FROM nodes\n WHERE name LIKE (?)\n \"\"\", (node_name,))\n res = cursor.fetchall()\n\n except sqlite3.OperationalError as e:\n print(\"ERROR: An error occurred when retrieving node ids: {}\".format(e))\n\n if len(res) == 0:\n print(\"ERROR: Could not find node ID for name '{0}'.\".format(node_name))\n return []\n\n elif len(res) > 1:\n print(\"Found multiple node IDs for name '{0}', returning first result.\".format(node_name))\n\n # e.g. [(10,), (11,)] => [10, 11]\n return [x[0] for x in res]" ]
[ "0.7406652", "0.72724056", "0.7271821", "0.7182018", "0.69669974", "0.6920127", "0.66888916", "0.6681467", "0.65701073", "0.65534806", "0.6541458", "0.635309", "0.62999123", "0.6280396", "0.6258627", "0.6188425", "0.61552095", "0.6140766", "0.6140766", "0.61280906", "0.60750425", "0.6074702", "0.60668343", "0.60665363", "0.6061329", "0.6045357", "0.60004324", "0.5986576", "0.5981687", "0.59774476" ]
0.75039303
0
Method that retrieves all the ids of Location Node
def getLocationsIds(): with driver.session() as s: ids = s.write_transaction(getLocationsId) lIds = [] for idEl in ids: lIds.append(idEl["ID(l)"]) return lIds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLocationsId(tx):\n query = (\n \"MATCH (l:Location)\"\n \"RETURN ID(l)\"\n )\n\n idsList = tx.run(query).data()\n return idsList", "def findAllLocation(tx):\n query = (\n \"MATCH (l:Location) \"\n \"RETURN l , ID(l);\"\n )\n results = tx.run(query).data()\n return results", "def getIDs():", "def get_node_ids(self):\n \n return self.node_ids", "def node_ids(self):\n return [self.node_id]", "def get_all_locations(self):", "def get_ids(self) -> List[str]:", "def get_locations_by_ids(self, id_list):", "def all_ids(self) -> Set[int]:\n return {node_id for _, (node_id, _) in self.nodes.items()}", "def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]", "def _node_ids(self, nodes=None): # this function comes from BaseCard.py\n if not nodes:\n nodes = self.nodes\n if isinstance(nodes[0], integer_types):\n node_ids = [node for node in nodes]\n else:\n node_ids = [node.nid for node in nodes]\n assert 0 not in node_ids, 'node_ids = %s' % (node_ids)\n return node_ids", "def get_plate_ids_from_node_ids(self, nodes: list[int]) -> list[int]:\n ids = []\n\n for k, v in vars(self).items():\n if v.nodes == nodes:\n ids.append(k)\n\n if len(ids) == 0:\n ids = None\n\n return ids", "def get_all_location_ids_from_tcp_relay():\n # tcprelay has a default 5s timeout, but enumerates quickly\n # Run in the background and kill after 0.1s to avoid this\n cmd = \"/usr/local/bin/tcprelay --list & sleep 0.1; kill $!\"\n output = subprocess.check_output(cmd, shell=True)\n location_ids = re.findall(r\"Location:\\s*([0-9A-Fa-f]+)\", output)\n\n # convert hex string in location_ids to integer\n def hex_to_int(x): return int(x, 16)\n\n return list(map(hex_to_int, location_ids))", "def get_es_ids(self):\n search = self.search.source(['uri']).sort(['uri'])\n es_ids = [item.meta.id for item in search.scan()]\n return es_ids", "def get_ids(self):\n return self._ids", "def locations(self):\n return self.data.get(\"locations\", [])", "def get_node_list(self):\n logger.debug('Retrieving node list')\n self.node_ids = []\n\n # Iterate over interfaces, try to grab gateway ipv4 addr\n # Try to /ping gateway over TCP using default port.. if we get a pong, we may get a node ID\n gateways = netifaces.gateways()\n gateways = gateways.get(netifaces.AF_INET, [])\n\n for gateway in gateways:\n node_id = gateway[0]\n node = self.select_node(node_id)\n info = node.get_info()\n\n if info and info.get('node'):\n logger.debug('Found node with ID \"%s\"', node_id)\n self.node_ids.append(node_id)\n\n return self.node_ids", "def get_node_list(self):\n logger.debug('Updating node list')\n self.subscribe_mqtt('/nodes/+/responses/ping')\n self.node_ids = []\n\n def on_response(payload, data):\n if data and data.get('node', None):\n node_id = data['node']\n logger.debug('Found node with ID \"%s\"' % node_id)\n\n if node_id not in self.node_ids:\n self.node_ids.append(node_id)\n\n return False\n\n self.publish_mqtt('/ping', on_response=on_response)\n time.sleep(self.timeout / 1000)\n\n return self.node_ids", "def get_locations(self, id_):\n with self._db_connection() as connection:\n return connection.get_locations(id_)", "def get_ids(self):\n return self._graphs.keys()", "def _MocaCtlGetNodeIDs(self):\n mc = subprocess.Popen([MOCACTL, 'showtbl', '--nodestats'],\n stdout=subprocess.PIPE)\n out, _ = mc.communicate(None)\n nodes = set()\n for line in out.splitlines():\n node = NODE_RE.search(line)\n if node is not None:\n nodes.add(int(node.group(1)))\n node_list = list(nodes)\n length = len(node_list)\n if int(self.AssociatedDeviceCount) != length:\n type(self).AssociatedDeviceCount.Set(self, length)\n return node_list", "def all_node_ids(self):\n return [i for i in range(0, self.n_inputs + self.n_hidden + self.n_outputs)]", "def get_routes():\n\n return Db().get_line_ids()", "def ids(self):\n return self._ids", "def list_locations(self, _id):\n \n self.options['usr_locator_id'] = _id\n self.options['action'] = 'locator.location.list'\n return self.call(self.options)", "def _node_ids(self, nodes=None):\n if not nodes:\n nodes = self.nodes\n if isinstance(nodes[0], integer_types):\n return nodes\n return [node.nid for node in nodes]", "def get_possible_ids(self):\n ids = []\n\n dest_data = requests.get(\"https://api.wdpro.disney.go.com/facility-service/destinations/{}\".format(self.__anc_dest_id), headers=getHeaders()).json()\n data = requests.get(dest_data['links']['entertainmentVenues']['href'], headers=getHeaders()).json()\n\n for entry in data['entries']:\n try:\n ids.append(entry['links']['self']['href'].split('/')[-1].split('?')[0])\n except:\n pass\n\n return ids", "def get_locations_recursively(self, location_id):\n locations = []\n location = Location.objects.get(pk=location_id)\n if location.parent_location is not None:\n parent_id = location.parent_location.id\n locations += self.get_locations_recursively(parent_id)\n locations.append('/api/v1/location/{0}/'.format(location.id))\n return locations\n\n else:\n return ['/api/v1/location/{0}/'.format(location.id)]", "def get_ids(self):\n return [item.id for item in self.items]", "def getIDs(self):\n return self.multiengine.getIDs()" ]
[ "0.7874536", "0.731527", "0.7062646", "0.7016814", "0.7001121", "0.6844436", "0.6793107", "0.67177343", "0.6639855", "0.6492043", "0.63570356", "0.6255016", "0.6224796", "0.6186398", "0.61848336", "0.6168439", "0.61572087", "0.613083", "0.6110458", "0.6097127", "0.6097022", "0.60904723", "0.60896564", "0.6067964", "0.6064341", "0.6060444", "0.605844", "0.60576653", "0.60372525", "0.5998177" ]
0.7720268
1
Method that retrieves all the ids of test Node
def getTestsIds(): with driver.session() as s: ids = s.write_transaction(getTestsId) tIds = [] for idEl in ids: tIds.append(idEl["ID(t)"]) return tIds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTestsId(tx):\n query = (\n \"MATCH (t:Test)\"\n \"RETURN ID(t)\"\n )\n\n idsList = tx.run(query).data()\n return idsList", "def getIDs():", "def get_node_ids(self):\n \n return self.node_ids", "def node_ids(self):\n return [self.node_id]", "def get_ids(self) -> List[str]:", "def _node_ids(self, nodes=None): # this function comes from BaseCard.py\n if not nodes:\n nodes = self.nodes\n if isinstance(nodes[0], integer_types):\n node_ids = [node for node in nodes]\n else:\n node_ids = [node.nid for node in nodes]\n assert 0 not in node_ids, 'node_ids = %s' % (node_ids)\n return node_ids", "def all_ids(self) -> Set[int]:\n return {node_id for _, (node_id, _) in self.nodes.items()}", "def get_nids(self, nodes):\n nids = []\n\n for node in nodes.values():\n try:\n hostname = Conf.get(self._index, f'cluster>{node}>hostname')\n except:\n raise MotrError(errno.EINVAL, f\"{node} hostname not found\")\n\n check_type(hostname, str, \"hostname\")\n\n if self._server_id == node:\n cmd = \"lctl list_nids\"\n else:\n cmd = (f\"ssh -o \\\"StrictHostKeyChecking=no\\\" {hostname}\"\n \" lctl list_nids\")\n op = execute_command(self, cmd)\n nids.append(op[0].rstrip(\"\\n\"))\n\n return nids", "def get_node_ids(self, node_id):\n if node_id is not None:\n names = [node_id]\n else:\n names = []\n query_url = ('/api/node/class/fabricNode.json?'\n 'query-target-filter=eq(fabricNode.role,\"leaf\")')\n error_message = 'Could not get switch list from APIC.'\n nodes = self._get_query(query_url, error_message)\n for node in nodes:\n names.append(str(node['fabricNode']['attributes']['id']))\n return names", "def get_ids(self):\n page = r.get(self.url)\n tree = html.fromstring(page.content)\n ids_elements = tree.xpath(\"//div[@id='selectedcontent']/div/ul/li/a\")\n return [self._e_to_id(e) for e in ids_elements]", "def _MocaCtlGetNodeIDs(self):\n mc = subprocess.Popen([MOCACTL, 'showtbl', '--nodestats'],\n stdout=subprocess.PIPE)\n out, _ = mc.communicate(None)\n nodes = set()\n for line in out.splitlines():\n node = NODE_RE.search(line)\n if node is not None:\n nodes.add(int(node.group(1)))\n node_list = list(nodes)\n length = len(node_list)\n if int(self.AssociatedDeviceCount) != length:\n type(self).AssociatedDeviceCount.Set(self, length)\n return node_list", "def findAllTest(tx):\n query = (\n \"MATCH (t:Test) \"\n \"RETURN t , ID(t);\"\n )\n results = tx.run(query).data()\n return results", "def getNodeTests():\n\n nodeTestsQuery = NodeTest.query.all()\n \n if nodeTestsQuery: \n nodeTestList = []\n for nodeTestQuery in nodeTestsQuery:\n nodeTestList.append(nodeTestQueryToObject(nodeTestQuery))\n return nodeTestList\n else:\n return None", "def _node_ids(self, nodes=None):\n if not nodes:\n nodes = self.nodes\n if isinstance(nodes[0], integer_types):\n return nodes\n return [node.nid for node in nodes]", "def all_node_ids(self):\n return [i for i in range(0, self.n_inputs + self.n_hidden + self.n_outputs)]", "def get_node_list(self):\n logger.debug('Updating node list')\n self.subscribe_mqtt('/nodes/+/responses/ping')\n self.node_ids = []\n\n def on_response(payload, data):\n if data and data.get('node', None):\n node_id = data['node']\n logger.debug('Found node with ID \"%s\"' % node_id)\n\n if node_id not in self.node_ids:\n self.node_ids.append(node_id)\n\n return False\n\n self.publish_mqtt('/ping', on_response=on_response)\n time.sleep(self.timeout / 1000)\n\n return self.node_ids", "def get_plate_ids_from_node_ids(self, nodes: list[int]) -> list[int]:\n ids = []\n\n for k, v in vars(self).items():\n if v.nodes == nodes:\n ids.append(k)\n\n if len(ids) == 0:\n ids = None\n\n return ids", "def getIds(self) -> List[int]:\n return list(self.users.keys())", "def get_ids(self):\n return self._ids", "def getLocationsId(tx):\n query = (\n \"MATCH (l:Location)\"\n \"RETURN ID(l)\"\n )\n\n idsList = tx.run(query).data()\n return idsList", "def ids(self):\n return list(self._id_generator())", "def ids(self):\n return list(self._id_generator())", "def get_ids(self):\n return [item.id for item in self.items]", "def get_node_list(self):\n logger.debug('Retrieving node list')\n self.node_ids = []\n\n # Iterate over interfaces, try to grab gateway ipv4 addr\n # Try to /ping gateway over TCP using default port.. if we get a pong, we may get a node ID\n gateways = netifaces.gateways()\n gateways = gateways.get(netifaces.AF_INET, [])\n\n for gateway in gateways:\n node_id = gateway[0]\n node = self.select_node(node_id)\n info = node.get_info()\n\n if info and info.get('node'):\n logger.debug('Found node with ID \"%s\"', node_id)\n self.node_ids.append(node_id)\n\n return self.node_ids", "def get_uuids_in_node(self, node, project_id):\n program, project = project_id.split(\"-\", 1)\n\n try:\n res = self.paginate_query(node, project_id)\n uuids = [x[\"id\"] for x in res[\"data\"][node]]\n except:\n raise Gen3Error(\n \"Failed to get UUIDs in node '\"\n + node\n + \"' of project '\"\n + project_id\n + \"'.\"\n )\n\n return uuids", "def get_nodes(self, ids):\n return [self.node_labels[i] for i in ids]", "def getIDs(self):\n return self.multiengine.getIDs()", "def getNodeIds(self, cellId, edgeIndex):\n LIB.mnt_grid_getNodeIds.argtypes = [POINTER(c_void_p),\n c_longlong, c_int,\n POINTER(c_size_t)]\n nodeIds = (c_size_t*2)()\n ier = LIB.mnt_grid_getNodeIds(self.obj, cellId, edgeIndex, nodeIds)\n if ier:\n error_handler(FILE, 'getNodeIds', ier)\n return (nodeIds[0], nodeIds[1])", "def dump_trusted_identities():\n\n node_name = \"ala\"\n\n numberSubnodes = ens.numberSubnodes(node_name)\n id_list = []\n\n # Iterate for each node\n for i in range(numberSubnodes):\n\n # Get the subnode (in name_hash format)\n subnode_hash = ens.subnode(node_name, i)\n\n # Get the data for the subnode\n DID, name, DIDDocument, active = resolver.AlaDIDPublicEntity(\n node_hash=subnode_hash)\n\n identity = {\n \"DID\": DID,\n \"name\": name,\n \"node_hash\": subnode_hash.hex()\n }\n id_list.append(identity)\n \n return id_list", "def get_ordered_ids(tree):\n ordered_ids = []\n ordered_ids.extend(id(node) for node in tree.gen_tips())\n ordered_ids.extend(id(node) for node in tree.gen_internal_nodes())\n return ordered_ids" ]
[ "0.7611002", "0.74820614", "0.7343042", "0.7294262", "0.7118903", "0.71014166", "0.6871869", "0.6723846", "0.6703445", "0.67013633", "0.66748416", "0.6674061", "0.6599396", "0.65771914", "0.65442926", "0.6504613", "0.63970214", "0.6373796", "0.6266002", "0.6250796", "0.6246074", "0.6246074", "0.6234878", "0.6232209", "0.62292254", "0.6216335", "0.6213914", "0.61744857", "0.6155961", "0.6129645" ]
0.7595619
1
Method use to print the database structure using PlotDBStructure module
def printDatabase(): with driver.session() as s: personNodes = s.read_transaction(findAllPerson) houseNodes = s.read_transaction(findAllHome) locationNodes = s.read_transaction(findAllLocation) vaccineNodes = s.read_transaction(findAllVaccine) testNodes = s.read_transaction(findAllTest) liveRelationships = s.read_transaction(findAllLiveRelationships) visitRelationships = s.read_transaction(findAllVisitRelationships) appContactRelationships = s.read_transaction(findAllAppContactRelationships) getRelationships = s.read_transaction(findAllGetVaccineRelationships) makeRelationships = s.read_transaction(findAllMakeTestRelationships) infectRelationships = s.read_transaction(findAllInfectedRelationships) # Initialize the network attribute ps.PlotDBStructure.__init__() # Add nodes ps.PlotDBStructure.addStructure(personNodes) ps.PlotDBStructure.addStructure(houseNodes) ps.PlotDBStructure.addStructure(locationNodes) ps.PlotDBStructure.addStructure(vaccineNodes) ps.PlotDBStructure.addStructure(testNodes) # Add relationships ps.PlotDBStructure.addStructure(liveRelationships) ps.PlotDBStructure.addStructure(visitRelationships) ps.PlotDBStructure.addStructure(appContactRelationships) ps.PlotDBStructure.addStructure(makeRelationships) ps.PlotDBStructure.addStructure(getRelationships) ps.PlotDBStructure.addStructure(infectRelationships) # Show the graph structure ps.PlotDBStructure.showGraph() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_database_structure(self):\n self.analyze()\n items = []\n for model in get_models():\n names = []\n # for f, m in model._meta.get_fields_with_model():\n for f in model._meta.concrete_fields:\n names.append(f.name)\n items.append(\n \"{0} : {1}\".format(fmn(model), ', '.join(names)))\n\n items = sorted(items)\n return rstgen.ul(items)", "def print_database(self):\n table_names = self.catalog\n for table_name in table_names:\n table = self.parse_table(table_name)\n if not table:\n continue\n print(f'TABLE NAME: {table_name}\\r\\n')\n print(tabulate(table, headers=\"keys\"))\n print('\\r\\n\\r\\n\\r\\n\\r\\n')", "def dump(self):\n # This is pretty, but we could just return the ddl_string\n outputs = [\"Table : %s\\n\" % self.name]\n # We show the columns in sequence order, using DSU\n # DSU = Decorate, Sort, Undecorate - a.k.a Schwartzian transform\n deco_cols = [ (x['sequence'], x) for x in list(self.columns.values()) ]\n deco_cols.sort()\n cols = [ col for seq, col in deco_cols ]\n for column in cols:\n outputs.append(\" %-30s\" % column['name'])\n if 'length' in column and column['length'] != None:\n if 'precision' in column and column['precision'] != None:\n # This column is a numeric data type\n column_defn = column['type']+self.__class__.calc_precision(column['type'], column['length'], column['precision'], column['scale'])\n else:\n # This column is a text data type\n column_defn = '%s(%d)' % (column['type'], column['length'])\n else:\n # This column is a simple data type such as date or boolean\n column_defn = column['type']\n outputs.append(\" %-15s \" % column_defn)\n if not column['nullable']:\n outputs.append(\" NOT NULL\")\n if 'special' in column:\n # Special case for e.g. 'enum' in MySQL\n outputs.append(' %s' % column['special'])\n outputs.append(\"\\n\")\n # Constraints please\n if len(self.constraints) != 0:\n outputs.append(\" Constraints;\\n\")\n for constraint_name, constraint in list(self.constraints.items()):\n outputs.append(\" %s, \" % constraint_name)\n outputs.append(\"%s \" % (constraint['type']))\n if 'columns' in constraint:\n outputs.append(\": \")\n outputs.append(', '.join(constraint['columns']))\n outputs.append(\"\\n\")\n # Indexes\n if len(self.indexes) > 0:\n outputs.append(\" Indexes:\\n\")\n for index_name, index in list(self.indexes.items()):\n outputs.append(\" %s, \" % index_name)\n outputs.append(\"%s\\n\" % index['type'])\n # Don't check number of columns because there must be at least 1\n outputs.append(\" Columns: \")\n outputs.append(\", \".join(index['columns']))\n outputs.append(\"\\n\")\n # LOG.debug(\"Table Dump output: \" + \"\".join(outputs))\n return \"\".join(outputs)", "def printDBContent(self,table):\n # get column names from the tables\n columnNames=[]\n if self.dbType==\"sqlite\":\n\t query = \"SELECT sql FROM sqlite_master WHERE name='%s'\"%table\n\t tup = self.fetchOne(query)\n\t schema= tup[0]\n\t # SQLite prints schema in the following format:\n\t # CREATE TABLE Name ( field1, type1, field2, type2 )\n\t # we split string in such a way to extract fields\n\t list = string.split(schema,\"(\",1)[1:]\n\t cList = string.split(list[0],\",\")\n\t for idx in xrange(0,len(cList)):\n\t cName = string.split(cList[idx])[0]\n\t if cName!=\"PRIMARY\":\n\t columnNames.append(cName)\n\telse:\n\t query = \"DESCRIBE %s\"%table\n\t tup = self.fetchAll(query)\n\t for item in tup:\n\t # first element in item is column name\n\t cName = item[0]\n\t columnNames.append(cName) \n\tcontentList = columnNames\n\tquery = \"SELECT * FROM \"+table\n\ttup = self.fetchAll(query)\n\tprint\n\tprint \"%s content:\"%table\n\temptyString=\"\t\"\n\t# get sizes of columns from contentList\n\tsizeList=[]\n\tfor x in contentList:\n\t sizeList.append(len(x))\n\t\n\t# store tuple content into finalList while counting largest\n\t# length of the object\n\tfinalList=[]\n\tfor x in tup:\n\t for idx in xrange(0,len(x)):\n\t\tif len(\"%s\"%x[idx])>len(contentList[idx]) and len(\"%s\"%x[idx])>sizeList[idx]:\n\t\t sizeList[idx]=len(\"%s\"%x[idx])\n\tline=\"\"\n\tline1=\"\"\n\tfor idx in xrange(0,len(sizeList)):\n\t line+=\"=\"\n\t line1+=\"-\"\n\t for i in xrange(0,sizeList[idx]): \n\t\tline+=\"=\"\n\t\tline1+=\"-\"\n\tprint line\n\tfor idx in xrange(0,len(contentList)):\n\t fString=string.ljust(contentList[idx],sizeList[idx])\n\t print fString,\n\tprint\n\tprint line1\n\t\n\tfor x in tup:\n\t for idx in xrange(0,len(x)):\n\t\tfString=string.ljust(\"%s\"%x[idx],sizeList[idx])\n\t\tprint fString,\n\t print\n\tprint line", "def print_db_short(self, formation_name='all'):\n\n # Print out column labels\n print \"%s%s%s%s%s%s%s\" % (\"Formation:\", \" \" * (15-len(\"Formation:\")),\n \"Style:\", \" \" * (15-len(\"Style:\")),\n \"# Links:\", \" \" * (10-len(\"# Links:\")),\n \"Description:\")\n\n # Iterate and print out all of the formations\n for formation in self.db:\n if formation['name'] == formation_name or formation_name == 'all':\n # Print out formation info\n print \"%s%s%s%s%s%s%s\" % (formation['name'], \" \" * (15-len(formation['name'])),\n formation['style'], \" \" * (15-len(formation['style'])),\n formation['num_links'], \" \" * (10-len(str(formation['num_links']))),\n formation['description'])", "def database_dump(self):\r\n print('=====Dumping database=====')\r\n self.database_table_dump(query.TABLE_STATS)\r\n print()\r\n self.database_table_dump(query.TABLE_TWEETS)\r\n print()\r\n self.database_table_dump(query.TABLE_POSTS)\r\n print()\r\n self.database_table_dump(query.TABLE_FOLLOWS)", "def dump_db_definition(self, args, dbdict):\n for k, v in dbdict.items():\n if re.search(b\"[^\\x0d\\x0a\\x09\\x20-\\x7e\\xc0-\\xff]\", v):\n print(\"%-20s - %s\" % (k, toout(args, v)))\n else:\n print('%-20s - \"%s\"' % (k, strescape(v)))", "def db_format(self):\r\n def gen_p2p_str(node):\r\n anc_ids = [a.id for a in node.ancestors()]\r\n anc_ids.reverse()\r\n anc_ids = anc_ids+[node.id]\r\n return ','.join([str(aid) for aid in anc_ids])\r\n\r\n db_dict = { 'location_id': [self.root.id],\r\n 'parent_id': [self.root.id],\r\n 'path_to_top_parent': [gen_p2p_str(self.root)],\r\n 'level': [0] }\r\n\r\n for lvl in range(1,self.max_depth()+1):\r\n nodes = self.level_n_descendants(lvl)\r\n for n in nodes:\r\n db_dict['location_id'].append(n.id)\r\n db_dict['parent_id'].append(n.parent.id)\r\n db_dict['path_to_top_parent'].append(gen_p2p_str(n))\r\n db_dict['level'].append(lvl)\r\n\r\n db_df = pd.DataFrame(db_dict)\r\n leaf_ids = [l.id for l in self.leaves()]\r\n db_df['most_detailed'] = 0\r\n db_df.ix[db_df.location_id.isin(leaf_ids), 'most_detailed'] = 1\r\n return db_df", "def dump(self):\r\n for (name, value) in self.__table__.items():\r\n print (name)\r\n print (value)", "def dump(self):\n outputs = [\"View : %s\\n\" % self.name]\n cols = list(self.columns.values())\n cols.sort()\n for column in cols:\n outputs.append(\" %-30s %-12s\" % (column['name'], column['type']))\n outputs.append(\"%7s\" % self.__class__.calc_precision(column['type'],\n column['length'], column['precision'], column['scale']))\n if not column['nullable']:\n outputs.append(\" NOT NULL\")\n outputs.append(\"\\n\")\n outputs.append(\"\\n\")\n outputs.append(self.sql+\"\\n\")\n outputs.append(\"\\n\")\n return \"\".join(outputs)", "def prettyprint(self):\n\n import pandas.io.sql as psql\n df = psql.read_sql(\"SELECT * FROM ATOM\",self.conn)\n print(df)", "def schema_diagram():\n\n from sqlalchemy_schemadisplay import create_schema_graph\n\n graph = create_schema_graph(\n metadata=db.MetaData(app.config['SQLALCHEMY_DATABASE_URI']),\n show_datatypes=True,\n show_indexes=True\n )\n\n graph.write_png('schema.png')", "def python_data_printer(cur):\n # Print a header.\n for fieldDesc in cur.description:\n print (fieldDesc[fdb.DESCRIPTION_NAME].ljust(fieldDesc[fdb.DESCRIPTION_DISPLAY_SIZE]),end=' ')\n print('')\n for fieldDesc in cur.description:\n print (\"-\" * max((len(fieldDesc[fdb.DESCRIPTION_NAME]),fieldDesc[fdb.DESCRIPTION_DISPLAY_SIZE])),end=' ')\n print('')\n # For each row, print the value of each field left-justified within\n # the maximum possible width of that field.\n fieldIndices = range(len(cur.description))\n for row in cur:\n for fieldIndex in fieldIndices:\n fieldValue = row[fieldIndex]\n if not isinstance(fieldValue,types.StringTypes):\n fieldValue = str(fieldValue)\n if isinstance(fieldValue,types.UnicodeType):\n fieldValue = fieldValue.encode('utf8')\n fieldMaxWidth = max((len(cur.description[fieldIndex][fdb.DESCRIPTION_NAME]),cur.description[fieldIndex][fdb.DESCRIPTION_DISPLAY_SIZE]))\n print (fieldValue.ljust(fieldMaxWidth),end=' ')\n print('')", "def test_print_database():\n db_conn = conn_to_db('optwrf.db')\n print_database(db_conn)\n close_conn_to_db(db_conn)", "def __repr__(self):\n dbline = ' - {0}'\n fcline = ' + {0}'\n output = ['{0} ({1})'.format(self.name, self.url)]\n output.append('-' * len(output[0]))\n output.append(' {0}'.format(self.mxd))\n for db in self._dbnames:\n output.append(dbline.format(db))\n for fc in sorted(self._datastructure[db]):\n output.append(fcline.format(fc))\n output.append('')\n output.append('')\n return '\\n'.join(output)", "def print_dd_dict( self, ):\n print( self._dd_dict )", "def print_db(self, formation_name='all'):\n\n # Iterate and print out all of the formations\n for formation in self.db:\n if formation['name'] == formation_name or formation_name == 'all':\n # Print out formation info\n print \"===============| %s |===============\" % formation['name']\n print \"Style: %s\" % formation['style']\n print \"Description: %s\" % formation['description']\n print \"Number of Links: %s\" % formation['num_links']\n\n # Print out goalkeeper\n print \"\\nGoalkeeper: 1\"\n position = formation['positions']['GK']\n links = 'Links: '\n for link in position['links']:\n links += link + ', '\n print \"%s%s%s%s%s%s%s\" % (position['name'], \" \" * (35-len(position['name'])),\n position['symbol'], \" \" * (5-len(position['symbol'])),\n 'GK', \" \" * (5-len('GK')),\n links)\n\n # Print out defense\n index = 1\n print \"\\nDefense: %d\" % formation['num_defenders']\n while index < 1 + formation['num_defenders']:\n for custom_symbol, position in formation['positions'].iteritems():\n if index == position['index']:\n links = 'Links: '\n for link in position['links']:\n links += link + ', '\n print \"%s%s%s%s%s%s%s\" % (position['name'], \" \" * (35-len(position['name'])),\n position['symbol'], \" \" * (5-len(position['symbol'])),\n custom_symbol, \" \" * (5-len(custom_symbol)),\n links)\n index += 1\n break\n\n # Print out midfield\n index = 1 + formation['num_defenders']\n print \"\\nMidfield: %d\" % formation['num_midfielders']\n while index < 1 + formation['num_defenders'] + formation['num_midfielders']:\n for custom_symbol, position in formation['positions'].iteritems():\n if index == position['index']:\n links = 'Links: '\n for link in position['links']:\n links += link + ', '\n print \"%s%s%s%s%s%s%s\" % (position['name'], \" \" * (35-len(position['name'])),\n position['symbol'], \" \" * (5-len(position['symbol'])),\n custom_symbol, \" \" * (5-len(custom_symbol)),\n links)\n index += 1\n break\n\n # Print out offense\n index = 1 + formation['num_defenders'] + formation['num_midfielders']\n print \"\\nOffense: %d\" % formation['num_attackers']\n while index < 1 + formation['num_defenders'] + formation['num_midfielders'] + \\\n formation['num_attackers']:\n for custom_symbol, position in formation['positions'].iteritems():\n if index == position['index']:\n links = 'Links: '\n for link in position['links']:\n links += link + ', '\n print \"%s%s%s%s%s%s%s\" % (position['name'], \" \" * (35-len(position['name'])),\n position['symbol'], \" \" * (5-len(position['symbol'])),\n custom_symbol, \" \" * (5-len(custom_symbol)),\n links)\n index += 1\n break\n print ''", "def grasspi_print_db(table_name):\n\n conn = sqlite3.connect(grasspi_config.cfg.db_file)\n conn.text_factory = str\n c = conn.cursor()\n val = \"SELECT * FROM \" + table_name\n for row in c.execute(val):\n #conn.text_factory = str\n print row\n c.close()", "def showSchema (self):\n\t\ts=[];add=s.append\n\t\tfor i in range(len(self.schema)):\n\t\t\tadd (\"%d. %s\" % (i+1, self.schema[i]))\n\t\treturn join (s, '\\n')", "def print_tables(self):\n\n conn = self.engine.connect()\n self.print_table(self.nodes, conn)\n self.print_table(self.paths, conn)\n self.view_tree(connection=conn)", "def uglyprint(self):\n\n ctmp = self.conn.cursor()\n ctmp.execute(\"SELECT * FROM ATOM\")\n print(ctmp.fetchall())", "def pretty_print_drt(self):\n self.drt_manager.pretty_print_drt()", "def __repr__(self):\n\n\t\t# Preparing variables\n\t\tl_s_content = [\t\t# List containing the content to print\n\t\t\t\"> The structure object :\"\n\t\t]\n\n\t\t# PDB fields\n\t\tl_s_content.append(\"s_name : {}\".format(self.s_name))\n\n\t\t# Structural fields\n\t\tl_s_content.append(\"i_atom_count : {}\".format(self.i_atom_count))\n\t\tl_s_content.append(\"a_atoms : {}\".format(len(self.a_atoms)))\n\n\t\t# Grid fields\n\t\tl_s_content.append(\"b_loaded : {}\".format(self.b_loaded))\n\t\tl_s_content.append(\"a_grid : {}\".format(self.a_grid.size))\n\n\t\treturn \"\\n\".join(l_s_content)\t\t# Returns the content to show", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def print_poyo():\n\tpoyo = \"SELECT * FROM poyo\"\n\tcur.execute(poyo)\n\tprint_table(hdrs_poyo)", "def show_db_overview(self):\n\n models_list = sorted_models_list()\n apps = [p.app_label for p in settings.SITE.installed_plugins]\n s = \"%d apps: %s.\" % (len(apps), \", \".join(apps))\n s += \"\\n%d models:\\n\" % len(models_list)\n i = 0\n headers = [\n #~ \"No.\",\n \"Name\",\n \"Default table\",\n #~ \"M\",\n \"#fields\",\n \"#rows\",\n #~ ,\"first\",\"last\"\n ]\n rows = []\n for model in models_list:\n if True: # model._meta.managed:\n i += 1\n cells = []\n #~ cells.append(str(i))\n cells.append(fmn(model))\n cells.append(model.get_default_table())\n #~ cells.append(str(model))\n #~ if model._meta.managed:\n #~ cells.append('X')\n #~ else:\n #~ cells.append('')\n cells.append(str(len(model._meta.concrete_fields)))\n qs = model.objects.all()\n n = qs.count()\n cells.append(str(n))\n #~ if n:\n #~ cells.append(obj2str(qs[0]))\n #~ cells.append(obj2str(qs[n-1]))\n #~ else:\n #~ cells.append('')\n #~ cells.append('')\n\n rows.append(cells)\n s += rstgen.table(headers, rows)\n return s", "def print_tables(db):\n # connect to the database and create a cursor\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByCountry'\n\n # print the data from StatelessCountByCountry\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByRegion'\n\n # print the data from StatelessCountByRegion", "def help_dump(self):\n print(DUMP)", "def dbtrace_ui():\n\n pass", "def md_repr(self):\n dbline = '- {0}'\n fcline = ' + {0}'\n output = ['## {0} ({1})\\n'.format(self.name, self.url)]\n output.append('**{0}**\\n'.format(self.mxd.replace(\"\\\\\", \"\\\\\\\\\")))\n for db in self._dbnames:\n output.append(dbline.format(db.replace(\"\\\\\", \"\\\\\\\\\")))\n for fc in sorted(self._datastructure[db]):\n output.append(fcline.format(fc))\n output.append('')\n output.append('')\n return '\\n'.join(output)" ]
[ "0.72862166", "0.703305", "0.6886093", "0.6773256", "0.67546386", "0.67323565", "0.66061884", "0.65685666", "0.6563757", "0.6548127", "0.65158546", "0.65069234", "0.64561975", "0.6444599", "0.64432746", "0.6399927", "0.6342669", "0.63025194", "0.62467486", "0.6233792", "0.62174034", "0.6159485", "0.6076921", "0.60732055", "0.606916", "0.6060246", "0.6032298", "0.6010729", "0.5994955", "0.5987843" ]
0.79304254
0
Generate PW based on the current state, ie. current chunk, previously computed chunks and the current counter.
def generate_pw(self): chunks = [] for chunk_no in range(self.CHUNKS): if chunk_no < self.chunk: chunks.append(self.verified_chunks[chunk_no]) elif chunk_no == self.chunk: chunks.append(str(self.counter).zfill(self.PASSWORD_LENGTH / self.CHUNKS)) else: chunks.append("000") return "".join(chunks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def password_generate_complex(self, ctx):\n await ctx.send(\n \"\".join(\n random.choice(string.ascii_letters[:94]) for i in range(random.randint(20, 35))\n )\n )", "def _generate(self, event):\n N = self.numDigits.GetValue()\n \n if not self.numDigits.IsInBounds():\n dlg = wx.MessageDialog\\\n (\n None, \n 'Password must be at least %s characters long' \n % str(self.numDigits.GetMin()),\n 'Password Generator Error', \n wx.OK|wx.ICON_ERROR\n )\n \n dlg.ShowModal()\n dlg.Destroy()\n return\n \n chars = ''\n \n if self.useLowerCase.IsChecked():\n chars = chars + string.ascii_lowercase\n \n if self.useUpperCase.IsChecked():\n chars = chars + string.ascii_uppercase\n \n if self.useDigits.IsChecked():\n chars = chars + string.digits\n \n if self.usePunctuation.IsChecked():\n chars = chars + self.punct\n \n if self.excludeSimilar.IsChecked():\n chars = chars.translate(None,'1IlS$')\n \n if not chars:\n dlg = wx.MessageDialog(None, \n 'No characters selected',\n 'Password Generator Error', \n wx.OK|wx.ICON_ERROR)\n dlg.ShowModal()\n dlg.Destroy()\n return\n \n \n randgen = random.SystemRandom()\n\n if self.startWithLetter.IsChecked():\n firstchars = chars.translate(None, string.digits+self.punct)\n fc = randgen.choice(firstchars)\n else:\n fc = ''\n\n self.password.Value = fc + \\\n ''.join(randgen.choice(chars) for i in range(N-len(fc)))\n \n # Calculate password strength and update bar\n nDays = calc_password_strength(self.password.Value, self.speed)\n self.strength.UpdateStrength(nDays)", "def generate(self):\n\n four_digits = random.choice(string.ascii_uppercase) + random.choice(string.ascii_lowercase) + \\\n random.choice(string.digits) + random.choice(string.punctuation)\n\n if self.pass_length == 4:\n\n # if password is 4 letter long\n self.shuffle_pass(four_digits)\n else:\n\n # if password length is higher than 4 it add some printable letter and add to the four_digit variable\n diff = self.pass_length - 4\n password_long = ''\n i = 1\n while i <= diff:\n i += 1\n p = random.choice(string.printable)\n password_long += p\n self.shuffle_pass(four_digits + password_long)", "def passwordGen() :\n\treturn __randomString(12)", "def generate_random_password(self):\r\n self.symbols = self.__set_symbol_dict() # set new symbol subset dict\r\n self.i = randrange(len(self.symbols)) # set new dict key pointer\r\n return \"\".join(self.__get_random_symbol() for _ in range(self.pw_len))", "def updateGeneratedPassword(num_letters, *args):\n\n indicator_text = f\"\"\"\n Select length with the slider: {num_letters}\n \"\"\"\n current_generated_password = generatePassword(int(num_letters))\n return (\n current_generated_password,\n dcc.Markdown(indicator_text),\n \"\",\n )", "def passwordGen(self):\n password = ''\n while len(password) < self.length:\n ls = []\n if self.numeric: ls.append(random.choice(list(string.digits)))\n if self.lower : ls.append(random.choice(list(string.ascii_lowercase)))\n if self.upper : ls.append(random.choice(list(string.ascii_uppercase)))\n if self.symbol : ls.append(random.choice(list(string.punctuation)))\n if not ls: sys.exit(0)\n random.shuffle(ls)\n if self.length - len(password) > len(ls):\n password += ''.join(ls) \n else:\n password += ''.join(ls[:self.length - len(password)])\n\n return password", "def genPassword(charOrWords):\n\tcurrentGenPasswordMethod.set(charOrWords)\n\tif charOrWords == \"words\":\n\t\t#Get the data from sliders etc\n\t\tnumberOfWords=genPasswordWordsLengthSlider.getValue()\n\t\tseperator=genPasswordWordsSeperatorVar.get()\n\t\tcommonWords=genPasswordWordCommonVar.get()\n\t\tpassword=generateWordPassword(numberOfWords,seperator,commonWords)\n\telse:\n\t\t#Get the length and amount of symbols etc\n\t\tnumberOfCharacters=genPasswordCharLengthSlider.getValue()\n\t\tnumberOfDigits=genPasswordDigitsSlider.getValue()\n\t\tnumberOfSymbols=genPasswordSymbolsSlider.getValue()\n\t\t#Generate the password\n\t\tpassword=generatePassword(numberOfCharacters,numberOfSymbols,numberOfDigits)\n\tgenPasswordVar.set(password)\n\n\t#Calculate password strength\n\tpasswordStrength=calculatePasswordStrength(password,split=genPasswordWordsSeperatorVar.get())\n\tpasswordScoreString=passwordStrength[5]\n\t#Show labels\n\tif passwordScoreString == \"Strong\":\n\t\tgenPasswordStrengthVar.set(\"Strong password\")\n\t\tgenPasswordLabel.config(fg=\"#66BC15\")\n\n\telif passwordScoreString == \"Medium\":\n\t\tgenPasswordStrengthVar.set(\"Medium password\")\n\t\tgenPasswordLabel.config(fg=mainOrangeColour)\n\n\telse:\n\t\tgenPasswordStrengthVar.set(\"Weak password\")\n\t\tgenPasswordLabel.config(fg=mainRedColour)\n\n\t#Add to the review screen\n\taddDataToWidget(genReviewEntry,password)\n\treviewPassword()", "def gen_hash(self, data):\n password_gen = crypt.encrypt(data)\n return password_gen", "def generate_password(self):\n password = str()\n\n length = len(self.chars_password)\n for index in range(self.length_password):\n char_index = random.randint(0, length - 1)\n password += self.chars_password[char_index]\n\n return password", "def pwgen(length=16, ichars=string.ascii_letters+string.digits):\n return ''.join(random.choice(ichars) for i in range(length))", "def make_random_passphrase():\n import random\n prng = random.SystemRandom()\n templates = ['aababbab', 'aabbabab', 'aabbabba', 'abaabbab', 'abababab',\n 'abababba', 'ababbaab', 'ababbaba', 'abbaabab', 'abbaabba',\n 'abbabaab', 'abbababa', 'abbabbaa', 'baababab', 'baababba',\n 'baabbaab', 'baabbaba', 'babaabab', 'babaabba', 'bababaab',\n 'babababa', 'bababbaa', 'babbaaba', 'babbabaa']\n alphabet = {'a':\"aeiou\", 'b':list(\"bcdfghjklmnprsvwxyz\") + [\"ch\",\"ph\",\"st\"]}\n for n in (1,2,3):\n template = prng.choice(templates)\n password = \"\".join([prng.choice(alphabet[c]) for c in template])\n print password.capitalize() + prng.choice(\"0123456789\"),\n return 0", "def iterate_pword(current_password):\n\n num = _pword_to_num(current_password) # Turn password into list of ints\n for idx in reversed(range(len(num))):\n char_ord = num[idx]\n if char_ord != 122:\n char_ord += 1\n num[idx] = char_ord\n break\n else:\n char_ord = 97\n num[idx] = char_ord\n return _num_to_pword(num)", "def generate_password(self, length):\n items = [\"a\", \"e\", \"i\", \"o\", \"u\", \"1\", \"2\", \"4\", \"5\", \"7\", \"8\", \"9\"]\n\n new_password = \"\"\n while(len(new_password) < length):\n item = items[randint(0, len(items) - 1)]\n new_password += item\n return new_password", "def solve_part_one(self):\n password = \"\"\n index = 0\n while len(password) < 8:\n (s, found_index) = self.find_next_hash(index)\n password += s[5]\n index = found_index + 1\n return password", "def generate_password(path: str, number: int) -> str:\n password = \"\"\n for i in range(number):\n rand_line = generate_random_numbers_string()\n password += Program.find_string_by_number(rand_line, path)\n\n return password", "def generate_password(n):\n import os\n import math\n from base64 import b64encode\n return b64encode(os.urandom(int(math.ceil(0.75*n))),'-_')[:n]", "def giveReadablePassword():\n import random\n words = [\n 'Alpha',\n 'Bravo',\n 'Charlie',\n 'Delta',\n 'Echo',\n 'Foxtrot',\n 'Golf',\n 'Hotel',\n 'India',\n 'Juliet',\n 'Kilo',\n 'Lima',\n 'Mike',\n 'November',\n 'Oscar',\n 'Papa',\n 'Quebec',\n 'Romeo',\n 'Sierra',\n 'Tango',\n 'Uniform',\n 'Victor',\n 'Whiskey',\n 'Xray',\n 'Yankee',\n 'Zulu']\n\n chars = [\n '!',\n '#',\n '$',\n '%',\n '&',\n '*',\n '-',\n '.',\n ':',\n '?',\n '@' \n ]\n\n\n random.seed()\n pw = ''\n pw += random.choice(words)\n pw += random.choice(words)\n pw += random.choice(chars)\n pw += \"{:04d}\".format(random.randint(0,10000))\n return pw", "def gen_pass(*, pw_length=10, use_nums=True, use_special=True,\n no_dupes=False, no_ambiguous=True):\n # Build up desired population of characters\n charset = LETTERS\n if use_nums:\n charset += NUMS\n if use_special:\n charset += SPECIALS\n if no_ambiguous:\n charset = ''.join([x for x in charset if x not in AMBIGUOUS])\n\n if no_dupes:\n x, tmp = pw_length, []\n while x > 0:\n val = ''.join(random.sample(charset, 1))\n if val not in tmp:\n tmp.append(val)\n x -= 1\n return ''.join(tmp)\n else:\n return ''.join(random.sample(charset, pw_length))", "def password_generator(password_lenght):\r\n password = \"\"\r\n\r\n try:\r\n if password_lenght >=1:\r\n for i in range(password_lenght):\r\n choice = random.choice(symbols)\r\n password += str(choice)\r\n print(f\"Your password is: {password} \\nTnank you!\")\r\n return password\r\n else:\r\n return 0\r\n except Exception:\r\n pass", "async def password_generate_strong(self, ctx, delimeter: str = \"\"):\n d = delimeter\n rc = random.choice\n rr = random.randint\n await ctx.send(\n d.join(rc(RANDOM_WORDS).capitalize() for i in range(3)) + f\"{d}{rr(1,1000)}\"\n )", "def gen_passphrase(self):\n return ''.join(\n random.sample(map(str, range(0,10)) +\n map(chr, range(ord('a'), ord('z') + 1)) +\n map(chr, range(ord('A'), ord('Z') + 1)), self.passphraselen))", "def create_password(self):\r\n alphabet = string.ascii_letters + string.digits\r\n password = ''.join(secrets.choice(alphabet) for i in range(30))\r\n\r\n QtWidgets.QMessageBox.information(self, \"Password generated\", \r\n \"{}\".format(password))", "def generate_password(\n password_length: int = 8,\n has_symbols: bool = False,\n has_uppercase: bool = False\n) -> str:", "def genPwd(alpha, length):\n # be sure that each character is exactly once present\n alpha = list(set(alpha))\n # return the created password\n return \"\".join([random.choice(alpha) for _ in range(length)])", "def genpass(length):\n password = \"\"\n choice = string.ascii_letters + string.digits\n for i in range(length):\n password += random.choice(choice)\n return password", "def generate_password(self, p_length=10, use_symbols=False):\n\n lowercase = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',\n 'u', 'v', 'w', 'x', 'y', 'z']\n uppercase = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',\n 'U', 'V', 'W', 'X', 'Y', 'Z']\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n symbols = ['!', '@', '#', '$', '%', '&', '?']\n pwd = ''\n # Decide whether we want symbols in the password\n if use_symbols:\n n = 4\n else:\n n = 3\n for i in range(p_length):\n list_choice = random.randint(0, n) # choose a list to select from\n\n # From the chosen list pick a random character\n if list_choice == 0:\n character = lowercase[self.random_index(len(lowercase))]\n elif list_choice == 1:\n character = uppercase[self.random_index(len(uppercase))]\n elif list_choice == 2:\n character = numbers[self.random_index(len(numbers))]\n elif list_choice == 3:\n character = symbols[self.random_index(len(symbols))]\n\n pwd += character # add the random char from random list\n\n return pwd", "def password_generator(self):\n password_list = []\n for generated in JugglerPassGen.generate(self.word): # call the function for permutations\n password_list.append(generated)\n return password_list", "def pwgen(length=32, exclude=None):\n # pwgen -sBy -r \"\\`'\\\"\" 32 1\n if exclude is None:\n exclude = r\"\\`'\\\"\"\n else:\n exclude = r\"\\`'\\\"\" + exclude\n cmd = [\"pwgen\", \"-sBy\", \"-r\", exclude, \"%s\" % length, \"1\"]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # nosec\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n raise Exception(\"pwgen: %s %s\" % (stdout, stderr))\n return stdout.strip()", "def _generateblocks(self, n):\n if self.key is None:\n raise AssertionError('generator must be seeded before use')\n result = b''\n for i in range(n):\n result += self._cipher.encrypt(self.counter())\n return result" ]
[ "0.63229275", "0.6210466", "0.61542976", "0.6128442", "0.59861827", "0.5977954", "0.58935195", "0.58218557", "0.5725991", "0.56999713", "0.5637931", "0.5622442", "0.5614632", "0.55738485", "0.55629945", "0.5555935", "0.55490786", "0.5536461", "0.5524202", "0.55031157", "0.5492415", "0.54821396", "0.5466142", "0.54274625", "0.54200643", "0.54100764", "0.5404059", "0.5397713", "0.5384612", "0.5372836" ]
0.785432
0
Calculate the delta from the result. Returns a tuple of (delta, confident) Where ``delta`` is either a positive value that has been repeated at the last ``self.confirmations`` times or a negative value indicating an irregular delta. Confident is True if the value also satisfies the extra checks.
def confirm(self, result): delta = result.source_port - self.last_source_port self.last_source_port = result.source_port log.debug("source_port={0}, last_source_port={1}, " "real_delta={2}".format( result.source_port, self.last_source_port, delta)) # Either first connect or counter reset if delta < 1: return (delta, False) self.ringbuffer.append(delta) if len(self.ringbuffer) == (self.confirmations + self.extra): value = self.ringbuffer[-1] sames = len(filter(lambda x: x == value, self.ringbuffer)) if sames >= self.confirmations: return value, (sames == self.confirmations + self.extra) return (-1, False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_delta(self):\n rho_des_index, distance, data_size = self.rho_des_index, self.distance, self.data_size\n self.result[rho_des_index[0]][1] = -1\n for i in range(1, data_size):\n for j in range(0, i):\n old_i, old_j = rho_des_index[i], rho_des_index[j]\n min_pos, max_pos = min(old_j, old_i), max(old_j, old_i)\n if distance[(min_pos, max_pos)] < self.result[old_i][1]:\n self.result[old_i][1] = distance[(min_pos, max_pos)]\n self.master[old_i] = old_j\n self.result[rho_des_index[0]][1] = max(self.result[:, 1])", "def _get_delta(self, now, then):\n if now.__class__ is not then.__class__:\n now = datetime.date(now.year, now.month, now.day)\n then = datetime.date(then.year, then.month, then.day)\n if now < then:\n raise ValueError(\"Cannot determine moderation rules because date field is set to a value in the future\")\n return now - then", "def future_deceiveddown(self, a):\n nfav, succfav = self.control[a.name] \n #f_n = n+1;\n # f_worldround = self.world.round+1\n f_successRate = float(a.success) / float(self.world.round+1)\n f_successRatefav = float(succfav) / float(nfav+1)\n if hardrule:\n return self.nsucc(a) > (f_successRatefav + epsilonD) and \\\n (nfav+1 > 5) and ((self.world.round - nfav) > 5) \n else:\n return nfav+1 > 5 and (f_successRate > f_successRatefav + epsilonD \\\n or f_successRate < epsilonD)", "def get_possible_tw(self):\n ev = self.ev\n f = np.array([np.abs(a - b) for a in ev for b in ev if not np.isclose(a, b)])\n return f[~(np.triu(np.abs(f[:, None] - f) <= settings.EQ_COMPARE_TOL, 1)).any(0)]", "def future_deceivedup(self, a): \n nfav, succfav = self.control[a.name]\n #f_n = n+1;\n # f_worldround = self.world.round+1\n f_successRate = float(a.success +1) / float(self.world.round+1)\n if hardrule:\n return (nfav+1 > 5) and ((self.world.round - nfav) > 5) and \\\n float(a.success+1-succfav)/(self.world.round+1 - nfav) > \\\n (float(succfav)/nfav) + epsilonD\n else:\n return nfav > 5 and (f_successRate > (float(succfav)/nfav) + epsilonD \\\n or f_successRate < epsilonD)", "def reached_convergence(self, delta):\n num_evaluations = len(self.evaluations)\n if num_evaluations < 4:\n return False\n\n if self.best_eval_actions is not None and \\\n (len(self.best_eval_actions) - self.evaluations[num_evaluations - 1]) > delta + 2:\n return False\n\n diff1 = abs(self.evaluations[num_evaluations - 4] - self.evaluations[num_evaluations - 3])\n diff2 = abs(self.evaluations[num_evaluations - 4] - self.evaluations[num_evaluations - 2])\n diff3 = abs(self.evaluations[num_evaluations - 4] - self.evaluations[num_evaluations - 1])\n diff4 = abs(self.evaluations[num_evaluations - 3] - self.evaluations[num_evaluations - 2])\n diff5 = abs(self.evaluations[num_evaluations - 3] - self.evaluations[num_evaluations - 1])\n diff6 = abs(self.evaluations[num_evaluations - 2] - self.evaluations[num_evaluations - 1])\n\n actions = 0\n for agent in self.agents:\n actions += len(agent.actions)\n\n # num of (all possible) actions ~= num of states\n eval_steps = min(actions, 100)\n\n if (self.evaluations[num_evaluations - 1] < eval_steps) and\\\n (self.evaluations[num_evaluations - 2] < eval_steps) and\\\n (self.evaluations[num_evaluations - 3] < eval_steps) and \\\n (self.evaluations[num_evaluations - 4] < eval_steps):\n\n if diff1 < delta and diff2 < delta and diff3 < delta and diff4 < delta and diff5 < delta and diff6 < delta:\n return True\n\n return False", "def _check_converge(self, final_state, final_state_pred):\r\n x_diff = float(abs(final_state[0] - final_state_pred[0]))\r\n y_diff = float(abs(final_state[1] - final_state_pred[1]))\r\n theta_diff = float(abs(final_state[2] - final_state_pred[2]))\r\n kappa_diff = float(abs(final_state[3] - final_state_pred[3]))\r\n\r\n converge = (x_diff <= self.acceptable_dx) & \\\r\n (y_diff <= self.acceptable_dy) & \\\r\n (theta_diff <= self.acceptable_dtheta) & \\\r\n (kappa_diff <= self.acceptable_dkappa)\r\n\r\n return converge", "def check_result(self, res):\n global dtParameterDesc, dtResultDesc\n if res not in dtResultDesc or 'tolerances' not in dtResultDesc[res]:\n # no tolerance definition\n return (True, None, None)\n try:\n ok = False\n show = ''\n badpars = []\n reference = None # reference parameter value to compare result with\n if 'reference' in dtResultDesc[res]:\n refpar = dtResultDesc[res]['reference']\n reference = self.parameters[refpar]\n resvalue = self.results[res]\n\n for tolpar in dtResultDesc[res]['tolerances']:\n tolvalue = self.parameters[tolpar]\n toltype = tolpar.split(' ')[1]\n if toltype == 'abstol' and reference is not None:\n if abs(reference-resvalue) <= tolvalue:\n ok = True\n else:\n show = '\\u21D1'\n badpars.append(tolpar)\n if toltype == 'reltol' and reference is not None and reference != 0:\n if abs(resvalue/reference-1) <= tolvalue:\n ok = True\n else:\n show = '\\u21D1'\n badpars.append(tolpar)\n if toltype == 'uplim':\n if resvalue <= tolvalue:\n ok = True\n else:\n show = '\\u21D1'\n badpars.append(tolpar)\n if toltype == 'lowlim':\n if resvalue > tolvalue:\n ok = True\n else:\n show = '\\u21D3'\n badpars.append(tolpar)\n return (ok, show, badpars)\n except (KeyError, TypeError):\n # some error\n print_exc()\n return (False, None, None)", "def delta(self, abs_value=False):\n return self.current - self.last if not abs_value else np.abs(self.current - self.last)", "def _check_vote_result(vote_type: int, proposal_info: 'ProposalInfo') -> bool:\n total_delegated = 0\n for vote_type_in_str in (\"agree\", \"disagree\", \"noVote\"):\n total_delegated += proposal_info.vote[vote_type_in_str][\"amount\"]\n\n preps_to_vote = proposal_info.vote[\"agree\" if vote_type == NetworkProposalVote.AGREE else \"disagree\"]\n voters_of_preps_to_vote: list = preps_to_vote[\"list\"]\n delegated_of_preps_to_vote: int = preps_to_vote[\"amount\"]\n try:\n if vote_type == NetworkProposalVote.AGREE:\n return len(voters_of_preps_to_vote) / proposal_info.total_voter >= ApproveCondition.APPROVE_RATE \\\n and delegated_of_preps_to_vote / proposal_info.total_delegated_amount \\\n >= ApproveCondition.APPROVE_RATE\n else:\n return len(voters_of_preps_to_vote) / proposal_info.total_voter >= ApproveCondition.DISAPPROVE_RATE \\\n and delegated_of_preps_to_vote / proposal_info.total_delegated_amount \\\n >= ApproveCondition.DISAPPROVE_RATE\n except ZeroDivisionError:\n return False", "def get_delta_distance_reward(self, previous_pos, new_pos):\n #cosine = np.dot(self.target_pos-previous_pos, new_pos-previous_pos)/((np.dot(self.target_pos-previous_pos,self.target_pos-previous_pos) * np.dot(new_pos-previous_pos,new_pos-previous_pos))**(0.5)+0.0001)\n dist_new=np.dot(new_pos-self.target_pos,new_pos-self.target_pos)**(0.5)\n dist_old=np.dot(previous_pos-self.target_pos,previous_pos-self.target_pos)**(0.5)\n if dist_old<=2.5*self.target_margin:\n reward=1000*self.total_distance\n print(\"\\nSUCESS!!!!!\")\n self.success=1\n #if cosine>=0:\n #reward=self.runtime\n elif self.hit_the_bounds(new_pos):\n reward=-500*self.total_distance\n else:\n reward=-7.5*dist_old/self.total_distance\n \n if dist_new<=5*self.target_margin:\n print(\"$$$$$ Almost there $$$$$\")\n if np.sign(dist_old-dist_new)==-1:\n print(\"%%%%%% Moving away :( %%%%\")\n else:\n print(\"%%%% Moving closer :) %%%%\") \n elif dist_new<=10*self.target_margin:\n print(\"$$$$$ Getting there $$$$$\")\n if np.sign(dist_old-dist_new)==-1:\n print(\"%%%%%% Moving away :( %%%%\") \n else:\n print(\"%%%% Moving closer :) %%%%\")\n \n return reward", "def verify_result(self, result, expected_interval=1):\n if len(result):\n try:\n prev_result = convert_to_int(result[0])\n for i in range(1, len(result)):\n current_result = convert_to_int(result[i])\n\n if current_result < prev_result:\n logger.error(\"Test '{0}' FAILED at index {1}: current value {2} is less than the previous \"\n \"value {3}\".format(self._test_name, i, current_result, prev_result))\n return False\n elif current_result - prev_result != expected_interval:\n logger.error(\n \"Test '{0}' FAILED at index {1}: the interval between current value {2} and the previous \"\n \"value {3} is less than the expected interval of {4}.\"\n .format(self._test_name, i, current_result, prev_result, expected_interval))\n return False\n prev_result = current_result\n return True\n except Exception as err:\n logger.error(\"Test '{0}' fails result verifications. Exception: {1}\".format(self._test_name, err))\n return False\n\n logger.error(\"Test '{0}' FAILED: Empty result.\".format(self._test_name))\n return False", "def delta(tval, tp_confidences, fp_confidences, num_samples):\n tp_percentage = \\\n np.sum([1 for x in tp_confidences if x > tval]) / num_samples\n if fp_confidences:\n fp_percentage = np.sum([1 for x in fp_confidences if x > tval]) / \\\n len(fp_confidences)\n else:\n fp_percentage = 0\n optimal_tp = len(tp_confidences) / num_samples\n delta_value = (tp_percentage - optimal_tp) ** 2 + fp_percentage ** 2\n return delta_value, tp_percentage, fp_percentage", "def _get_simulation_reward_with_done(self, info: dict) -> Tuple[float, bool]:\n return 0.0, False", "def compare_results(self):\n return self.guess_number == self.secret_number", "def calculate(self):\n\n tp = self.confusion_matrix.tp\n tn = self.confusion_matrix.tn\n fp = self.confusion_matrix.fp\n fn = self.confusion_matrix.fn\n\n agreement = tp + tn\n chance0 = (tn + fn) * (tn + fp)\n chance1 = (fp + tp) * (fn + tp)\n sum_ = tn + fn + fp + tp\n chance = (chance0 + chance1) / sum_\n\n return (agreement - chance) / (sum_ - chance)", "def discrepancy_resolved(self):\n # If there's a discrepancy and distance change matches the existing data, we're good.\n if self.distance_change == self.existing_data:\n return True\n # If recommend_updates, i.e., if self.distance_change == self.new_data, we'll update the data and we're good\n elif self.recommend_updates:\n return True\n else:\n return False", "def remaining(self):\n if self.goal:\n return self.goal - self.total_donated()\n else:\n return 0", "def error(preferences, true_preferences, normalised = True):\n\n # Sum all of the inconsistent pairs of preference relations as 1s.\n differences = sum(1 for (x,y) in preferences if (y,x) in true_preferences)\n\n # Sum all of the missing pairs of preference relations as 1/2s.\n differences += 0.5 * (abs(len(true_preferences) - len(preferences)))\n\n # If normalising the result (default) then divide the sum of the differences\n # by the length of the maximum number of pairs of relations.\n if normalised:\n return differences / len(true_preferences)\n\n return differences", "def get_delay_diff(self):\n if type(self.apply_delay) is not int:\n err = 'ReplicationDelay().get_delay_diff(): '\\\n 'the self.apply_dilay should be an integer'\n raise ValueError(err)\n\n if self.current_time_lag_min:\n self.delay_diff = self.apply_delay - self.current_time_lag_min\n return self.delay_diff\n else:\n # Delay setting not found in a recovery.conf,\n # or a database is not in recovery mode:\n return 0", "def deltaCalc(self, expected):\n \n n = len(self.structure)\n self.delta = [None] * n\n self.delta[n - 1] = []\n \n for i in xrange(len(expected)):\n curr = self.a[n - 1][i]\n self.delta[n - 1].append(self.derivativeFunc(curr) * (expected[i] - curr))\n self.delta[n - 1] = np.array(self.delta[n - 1])\n \n # From n - 1 to 1 layer \n for i in xrange(n - 1, 0, -1):\n currDelta = self.delta[i]\n if i != (n - 1):\n currDelta = currDelta[0][:-1]\n \n self.delta[i - 1] = np.array(np.dot(currDelta, self.theta[i]))\n self.delta[i - 1][0] *= self.a[i - 1]\n \n return", "def test_potential_differences(self):\n t, x_n, x_p = self.t, self.x_n, self.x_p\n\n np.testing.assert_array_almost_equal(\n self.phi_s_n(t, x_n) - self.phi_e_n(t, x_n), self.delta_phi_n(t, x_n)\n )\n np.testing.assert_array_almost_equal(\n self.phi_s_p(t, x_p) - self.phi_e_p(t, x_p),\n self.delta_phi_p(t, x_p),\n decimal=5,\n )", "def score(self) -> Tuple[bool, str, float]:\n\n num_miss = np.sum(self.algorithm_data[:,FieldRolls.StepResult] != self.algorithm_data[:,FieldRolls.ResultPresentation])\n num_miss_perc = num_miss * 100/self.algorithm_data.shape[0]\n return True, \"\", num_miss_perc", "def calculate(self):\n\n return \"Yes\" if self.result else \"No\"", "def get_ratio_guarantee_advance(self):\n return (\n self.ratio_guarantee_advance *\n self.get_period_guarantee_advance *\n self.ratio2_guarantee_advance\n )", "def uncertainty(preferences, true_preferences, normalised = True):\n\n # Sum the total of missing pairs of preference relations.\n differences = abs(len(true_preferences) - len(preferences))\n\n # If normalising the result (default) then divide the sum of the differences\n # by the length of the maximum number of pairs of relations.\n if normalised:\n return differences / len(true_preferences)\n\n return differences", "def update_correctness(self) -> Prediction:\n self.was_correct = self._calculate_whether_correct()\n return self.update()", "def __basic_adaptive_comp_theorem(self):\n global_epsilon, global_delta = self._epsilon_delta\n epsilon_sum, delta_sum = \\\n map(sum, zip(*self._private_data_epsilon_delta_access_history))\n return epsilon_sum > global_epsilon or delta_sum > global_delta", "def _compute_confirm_cancel(self):\n domain = [\n ('state', '=', 'done'),\n '|',\n ('production_id', 'in', self.ids),\n ('raw_material_production_id', 'in', self.ids)\n ]\n res = self.env['stock.move'].read_group(domain, ['state', 'production_id', 'raw_material_production_id'], ['production_id', 'raw_material_production_id'], lazy=False)\n productions_with_done_move = {}\n for rec in res:\n production_record = rec['production_id'] or rec['raw_material_production_id']\n if production_record:\n productions_with_done_move[production_record[0]] = True\n for production in self:\n production.confirm_cancel = productions_with_done_move.get(production.id, False)", "def delta_func(self, st):\n res0 = st._state['visible']['reserve'][0]\n res1 = st._state['visible']['reserve'][1]\n number = st._state['visible']['number']\n if st._state['visible']['turn'] is 0:\n delta = res0-res1\n else:\n delta = res1-res0\n return number, delta" ]
[ "0.5485164", "0.5410988", "0.53473544", "0.52948594", "0.52594554", "0.5210485", "0.5185091", "0.51780695", "0.51453096", "0.5131199", "0.5126392", "0.51253116", "0.5115992", "0.5030006", "0.5028473", "0.5013451", "0.49988964", "0.49855825", "0.49762923", "0.49669504", "0.4965816", "0.4941677", "0.49274474", "0.4925023", "0.49211735", "0.49168235", "0.4908367", "0.49061114", "0.4902852", "0.48974025" ]
0.62908655
0
Sends a read request using the specified function byte. Returns a response payload containing the result of the read request; the format of its contents depend on the function byte.
def request_read(self, function_byte: int) -> bytes: _validate_function_byte(function_byte) message = [ _BRAVIA_READ_REQUEST_HEADER_BYTE, _BRAVIA_REQUEST_CATEGORY_BYTE, function_byte, 0xFF, 0xFF, ] message.append(_calculate_checksum(message)) self._logger.debug( "Sending Bravia read request on %s: %s", self.serial_port.name, dump_bytes_to_str(message), ) self.serial_port.write(message) return self._get_read_request_response()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_read_request(file_offset=1, byte_count=MAX_READ):\n return StenoPacket(\n packet_id=StenoPacket.ID_READ,\n p1=file_offset,\n p2=byte_count,\n )", "def execute_read(function):\n raise NotImplementedError(\"execute_read() has not been implemented\")", "def _read(self, register):\n\n addr, num_bytes = register\n data = response = error = None\n if num_bytes == 1:\n data, response, error = self.packet_handler.read1ByteTxRx(\n self.port_handler, self._id, addr\n )\n elif num_bytes == 2:\n data, response, error = self.packet_handler.read2ByteTxRx(\n self.port_handler, self._id, addr\n )\n else:\n data, response, error = self.packet_handler.read4ByteTxRx(\n self.port_handler, self._id, addr\n )\n\n # Check response\n self._error_handler(response, error)\n\n return data", "async def i2c_read_request(self, address, register, number_of_bytes,\n read_type, cb=None):\n if address not in self.i2c_map:\n self.i2c_map[address] = {'value': None, 'callback': cb}\n if register is None:\n data = [address, read_type, number_of_bytes & 0x7f,\n (number_of_bytes >> 7) & 0x7f]\n else:\n data = [address, read_type, register & 0x7f, (register >> 7) & 0x7f,\n number_of_bytes & 0x7f, (number_of_bytes >> 7) & 0x7f]\n await self._send_sysex(PrivateConstants.I2C_REQUEST, data)", "async def i2c_read_request(self, address, register, number_of_bytes,\n read_type, cb=None, cb_type=None):\n if address not in self.i2c_map:\n # self.i2c_map[address] = [None, cb]\n self.i2c_map[address] = {'value': None, 'callback': cb,\n 'callback_type': cb_type}\n data = [address, read_type, register & 0x7f, (register >> 7) & 0x7f,\n number_of_bytes & 0x7f, (number_of_bytes >> 7) & 0x7f]\n await self._send_sysex(PrivateConstants.I2C_REQUEST, data)", "def readFunc(self, functionName, *args):\n self.codec.stream = StringIO(args[0])\n return getattr(self.codec, functionName)()", "def read_versa5(self,addr,fullrepsonse=False):\n time.sleep(0.002)\n addr = addr & 0xff\n cmd = bytes([0x07,0xea,addr,0x00])\n res = self.command(0x3c,cmd)\n if fullresponse:\n return res\n else:\n return res.response_data & 0x0ff", "def _read(self, register):\n\n assert register in _registers, 'Not a valid register. Register must be passed as string.'\n\n # send read command to register\n self.spi.writebytes([READ | _registers[register]])\n\n # return values in register\n return self.spi.readbytes(_register_len[register])", "def request_write(self, function_byte: int, payload: Sequence[int]) -> None:\n _validate_function_byte(function_byte)\n\n # Length of the payload plus the checksum\n message_length_byte = len(payload) + 1\n if message_length_byte > 255:\n raise ValueError(\n f\"Payload is too large (expected length <= 254 bytes, got {len(payload)} bytes)\"\n )\n\n message = [\n _BRAVIA_WRITE_REQUEST_HEADER_BYTE,\n _BRAVIA_REQUEST_CATEGORY_BYTE,\n function_byte,\n message_length_byte,\n *payload,\n ]\n message.append(_calculate_checksum(message))\n\n self._logger.debug(\n \"Sending Bravia write request on %s: %s\",\n self.serial_port.name,\n dump_bytes_to_str(message),\n )\n self.serial_port.write(message)\n self._get_write_request_response()", "async def call_function(self, command, params=[]):\n data = bytearray(2 + len(params))\n data[0] = _HOSTTOPN532\n data[1] = command & 0xFF\n for i, val in enumerate(params):\n data[2+i] = val\n \n # Send the frame and read the response\n await self._write_frame(data)\n response = await self._read_frame()\n\n if len(response) < 2:\n raise RuntimeError('Received smaller than expected frame')\n \n if not(response[0] == _PN532TOHOST and response[1] == (command+1)):\n raise RuntimeError('Received unexpected command response!')\n \n # Return response data.\n return response[2:]", "async def _read(self, unit, address, count, func):\n await self._connect_delay()\n async with self._lock:\n kwargs = {\"unit\": unit} if unit else {}\n result = await func(address, count, **kwargs)\n if isinstance(result, (ModbusException, ExceptionResponse)):\n _LOGGER.error(\"Hub %s Exception (%s)\", self._config_name, result)\n return result", "def send_read_request(self, start_position):\n\tsend_data = struct.pack(\"!6I\", 0b1101, 0b0001, self.epoch_no, self.handle_no, start_position, self.NUM_BYTES_TO_READ)\n\tself.client_socket.sendto(send_data, self.address)\t\n\treturn", "def _performCommand(self, functioncode, payloadToSubordinate):\n DEFAULT_NUMBER_OF_BYTES_TO_READ = 1000\n\n _checkFunctioncode(functioncode, None)\n _checkString(payloadToSubordinate, description='payload')\n\n # Build message\n message = _embedPayload(self.address, self.mode, functioncode, payloadToSubordinate)\n\n # Calculate number of bytes to read\n number_of_bytes_to_read = DEFAULT_NUMBER_OF_BYTES_TO_READ\n if self.precalculate_read_size:\n try:\n number_of_bytes_to_read = _predictResponseSize(self.mode, functioncode, payloadToSubordinate)\n except:\n if self.debug:\n template = 'MinimalModbus debug mode. Could not precalculate response size for Modbus {} mode. ' + \\\n 'Will read {} bytes. Message: {!r}'\n _print_out(template.format(self.mode, number_of_bytes_to_read, message))\n\n # Communicate\n response = self._communicate(message, number_of_bytes_to_read)\n\n # Extract payload\n payloadFromSubordinate = _extractPayload(response, self.address, self.mode, functioncode)\n return payloadFromSubordinate", "def read(self, readerFunction):\n self.beginRead()\n # Enter the reader's critical section\n result = readerFunction(self.data)\n # Exit the reader's critical section\n self.endRead()\n return result", "def read_bit(self, registeraddress, functioncode=2):\n _checkFunctioncode(functioncode, [1, 2])\n return self._genericCommand(functioncode, registeraddress)", "def callFunc(self, functionName, *args):\n getattr(self.codec, functionName)(args[0])\n return self.codec.stream.getvalue()", "def send_request(bytestr, mode, tag=''):\n init = Initializer.create_init()\n queue = init.queue\n\n addr = queue.get()\n client = ipc.HTTPTransceiver(addr, 12345)\n requestor = ipc.Requestor(PROTOCOL, client)\n\n data = dict()\n data['input'] = bytestr\n data['next'] = mode\n data['tag'] = tag\n\n start = time.time()\n requestor.request('forward', data)\n end = time.time()\n\n init.node_timer(mode, end - start)\n\n client.close()\n queue.put(addr)", "def status_request(dev, code, response_length, verbose=False):\n communicate(dev, a2b_hex('C' + code), a2b_hex('D' + code), verbose=verbose)\n response = dev.read(response_length)\n if verbose:\n print('<-', repr(response))\n return response", "async def _async_ws_get_function(self, function: int) -> Optional[str]:\n try:\n # The 'token' parameter has to be first, and 'fun' second\n # or the UPC firmware will return an error\n async with await self._session.post(\n f\"http://{self.host}/xml/getter.xml\",\n data=f\"token={self.token}&fun={function}\",\n headers=self.headers,\n allow_redirects=False,\n timeout=10,\n ) as response:\n\n # If there is an error\n if response.status != 200:\n _LOGGER.debug(\"Receive HTTP code %d\", response.status)\n self.token = None\n raise exceptions.ConnectBoxError()\n\n # Load data, store token for next request\n self.token = response.cookies[\"sessionToken\"].value\n return await response.text()\n\n except (asyncio.TimeoutError, aiohttp.ClientError) as err:\n _LOGGER.error(\"Error received on %s: %s\", function, err)\n self.token = None\n\n raise exceptions.ConnectBoxConnectionError()", "def get_response(command):\n connection = get_client()\n\n connection.send(command)\n\n data = connection.recv()\n connection.close()\n\n return data", "def read(self, nbytes: int, write: int = 0x00, /) -> bytes:", "def read(self, nbytes: int, write: int = 0x00, /) -> bytes:", "def FileRead(offset, bytes):\r\n return _hiew.HiewGate_FileRead(offset, bytes)", "def call_function(self, command, response_length=0, params=[], timeout_sec=1):\n # Build frame data with command and parameters.\n data = bytearray(2+len(params))\n data[0] = PN532_HOSTTOPN532\n data[1] = command & 0xFF\n data[2:] = params\n # Send frame and wait for response.\n if not self._write_frame(data):\n return None\n # Read response bytes.\n response = self._read_frame(response_length+2)\n # Check that response is for the called function.\n if not (response == \"no_card\"):\n if not (response[0] == PN532_PN532TOHOST and response[1] == (command+1)):\n raise RuntimeError('Received unexpected command response!')\n # Return response data.\n return response[2:]\n else:\n return response", "def mifare_read(self,address):\n return self.in_data_exchange(bytearray([MIFARE_COMMAND_READ,address]))", "def _rceCB(self, req):\r\n rosReq = rospy.AnyMsg()\r\n\r\n if _GZIP_LVL:\r\n rosReq._buff = zlib.decompress(req.getvalue())\r\n else:\r\n rosReq._buff = req.getvalue()\r\n\r\n rospy.wait_for_service(self._addr, timeout=5)\r\n serviceFunc = rospy.ServiceProxy(self._addr, self._srvCls)\r\n rosResp = serviceFunc(rosReq)\r\n\r\n if _GZIP_LVL:\r\n resp = StringIO(zlib.compress(rosResp._buff, _GZIP_LVL))\r\n else:\r\n resp = StringIO(rosResp._buff)\r\n\r\n return resp", "def kXR_read(self, streamid=None, status=None, dlen=None, data=None): \n return self.kXR_ok(streamid, status, dlen, data)", "def read_input_registers(self, unit, address, length):\r\n # generate jbus request \r\n request = Request(function_code=4, unit=unit, address=address, length=length)\r\n \r\n _logger.debug(\"Request is {}\".format(request.bytes))\r\n\r\n # generate jbus response\r\n response = self.transition.execute(request)\r\n return response", "def manage_read_request(self, client):\n\n # obtain the message\n message = client.recv()\n message = json.loads(message)\n msg = message[\"payload\"].strip()\n if msg.startswith(\"/\"):\n type = \"c2s\"\n elif msg.startswith(\"@\"):\n type = \"c2c\"\n else:\n type = \"c2g\"\n\n func = getattr(self, \"request_\"+type)\n func(client, message)\n # self.msg_map[message['type']](client, message)", "def cooperative_read(fd):\n def readfn(*args):\n result = fd.read(*args)\n sleep(0)\n return result\n return readfn" ]
[ "0.6297108", "0.62225974", "0.6193018", "0.60892624", "0.60267603", "0.5946476", "0.5598523", "0.5595243", "0.5543839", "0.5540499", "0.5503857", "0.5440764", "0.54234314", "0.5416054", "0.5386143", "0.5382996", "0.5360565", "0.5347526", "0.52887887", "0.5254913", "0.5248941", "0.5248941", "0.52421594", "0.52350855", "0.5226234", "0.5217183", "0.52058923", "0.52033496", "0.51899934", "0.5182931" ]
0.820644
0
Sends a write request using the specified function byte and corresponding payload. Does not return a response.
def request_write(self, function_byte: int, payload: Sequence[int]) -> None: _validate_function_byte(function_byte) # Length of the payload plus the checksum message_length_byte = len(payload) + 1 if message_length_byte > 255: raise ValueError( f"Payload is too large (expected length <= 254 bytes, got {len(payload)} bytes)" ) message = [ _BRAVIA_WRITE_REQUEST_HEADER_BYTE, _BRAVIA_REQUEST_CATEGORY_BYTE, function_byte, message_length_byte, *payload, ] message.append(_calculate_checksum(message)) self._logger.debug( "Sending Bravia write request on %s: %s", self.serial_port.name, dump_bytes_to_str(message), ) self.serial_port.write(message) self._get_write_request_response()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute_write(function):\n raise NotImplementedError(\"execute_write() has not been implemented\")", "async def _write(self, unit, address, value, func):\n await self._connect_delay()\n async with self._lock:\n kwargs = {\"unit\": unit} if unit else {}\n await func(address, value, **kwargs)", "def send_request(self, function_name, body):\n pass", "def _write(self, register, value):\n\n addr, num_bytes = register\n response = error = None\n if num_bytes == 1:\n response, error = self.packet_handler.write1ByteTxRx(\n self.port_handler, self._id, addr, value\n )\n elif num_bytes == 2:\n response, error = self.packet_handler.write2ByteTxRx(\n self.port_handler, self._id, addr, value\n )\n else:\n response, error = self.packet_handler.write4ByteTxRx(\n self.port_handler, self._id, addr, value\n )\n\n # Check response\n self._error_handler(response, error)", "def _send(self, ws, func, params):\n ws.send(self._create_msg(func, params))", "def _send_payload(payload_ptr, payload_len, code):\n \n FILE_DEVICE_UNKNOWN = 0x22\n FILE_ANY_ACCESS = 0\n METHOD_NEITHER = 3\n\n # Recreate CTL_CODE macro to generate driver IOCTL \n ctl_code = (\n (FILE_DEVICE_UNKNOWN << 16) |\n (FILE_ANY_ACCESS << 14) | \n (code << 2) | \n METHOD_NEITHER\n )\n\n # Create handle to driver \"\"\"\n handle = kernel32.CreateFileA(\n \"\\\\\\\\.\\\\HackSysExtremeVulnerableDriver\", # lpFileName\n 0xC0000000, # dwDesiredAccess\n 0, # dwShareMode\n None, # lpSecurityAttributes\n 0x3, # dwCreationDisposition\n 0, # dwFlagsAndAttributes\n None # hTemplateFile\n )\n\n IO_CTL = kernel32.DeviceIoControl(\n handle, # hDevice\n ctl_code, # dwIoControlCode\n payload_ptr, # lpInBuffer\n c_int(payload_len), # nInBufferSize\n None, # lpOutBuffer\n 0, # nOutBufferSize\n byref(c_ulong()), # lpBytesReturned\n None # lpOverlapped\n )\n \n return kernel32.CloseHandle(handle)", "def send_packet(sender, payload):\n sender.write(payload)", "def send_packet(sender, payload):\n\n sender.write(payload)", "def __call__(self, payload: Union[bytes, str]) -> None:\n self.send(payload)", "def write(self, command):\r\n try:\r\n cmd = urllib.parse.quote(command) # escape special chars\r\n req_url = self.url + 'write/' + cmd\r\n requests.get(url=req_url)\r\n except ValueError:\r\n print(\"uart failed write\")", "def _write_transaction(self, tx_fun, **kwargs):\n # Wrapper for neo4j.Session.write_transaction\n with self._driver.session() as session:\n session.write_transaction(tx_fun, **kwargs)", "def write(self, endpoint, data):\n return self.device.write(endpoint, data)", "def Write(self, request, global_params=None):\n config = self.GetMethodConfig('Write')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Write(self, request, global_params=None):\n config = self.GetMethodConfig('Write')\n return self._RunMethod(\n config, request, global_params=global_params)", "def write( chunk, callback=None ):", "def winhttp_WinHttpWriteData(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hRequest\", \"lpBuffer\", \"dwNumberOfBytesToWrite\", \"lpdwNumberOfBytesWritten\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def write( data ):", "def send(self, payload: Union[bytes, str]) -> None: # pragma: no cover\n raise NotImplementedError(\"send is not implemented\")", "def write_request(self, request):\n msg = self.serialise_cbor_request(request)\n written = 0\n while written < len(msg):\n written += self.write(msg[written:])", "def add_write_callback(self, fcn, **kwargs):\n assert(hasattr(fcn, '__call__'))\n self._write_callbacks[fcn] = kwargs", "def sendMessage(self, payload, isBinary):", "def write():\n pass", "def write(self, chunk):\n return self.tornado_request.write(chunk)", "def _send(self, what, value, address='localhost:502', **kwargs):\n\n colon_index = address.find(':')\n IP = '-i {} '.format(address[:colon_index])\n PORT = '-p {} '.format(address[colon_index+1:])\n # NOTE: following data is validated by client script\n MODE = '-m {} '.format('w')\n TYPE = '-t {} '.format(what[0])\n OFFSET = '-o {} '.format(what[1]) # NOTE: 0-based\n\n # NOTE: value is a list of bools or ints when write multiple times\n if 'count' in kwargs and kwargs['count'] > 1:\n count = kwargs['count']\n COUNT = '--count {} '.format(count)\n else:\n count = 1\n COUNT = '--count {} '.format(count)\n\n # NOTE: value is a int when writing to a register\n if what[0] == 'HR':\n if count == 1:\n VALUE = '-r {} '.format(value)\n else:\n VALUE = '-r '\n for v in value:\n VALUE += str(v)\n VALUE += ' '\n\n # NOTE: value is a bool when writing to a coil\n elif what[0] == 'CO':\n if count == 1:\n if value == True:\n VALUE = '-c {} '.format(1)\n else:\n VALUE = '-c {} '.format(0)\n else:\n VALUE = '-c '\n for v in value:\n if v == True:\n VALUE += str(1)\n else:\n VALUE += str(0)\n VALUE += ' '\n else:\n raise ValueError('IR and DI are read only data.')\n\n\n cmd = shlex.split(\n self._client_cmd +\n IP +\n PORT +\n MODE +\n TYPE +\n OFFSET +\n COUNT +\n VALUE\n )\n # print 'DEBUG modbus_send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR modbus _send: ', error)", "def write(self, writerFunction):\n self.beginWrite()\n # Enter the writer's critical section\n result = writerFunction(self.data)\n # Exit the writer's critical section\n self.endWrite()\n return result", "def _writeBytes(self, b):\n self.socket.send(b)", "def writeBytes(self, command):\r\n try:\r\n command += '0a'\r\n req_url = self.url + 'bwrite/' + command\r\n requests.get(url=req_url)\r\n except ValueError:\r\n print(\"uart failed write\")", "def writer(data):\n if len(data) == 0:\n return\n try:\n # Call user write function:\n if write_function != None:\n write_function(data)\n except Exception as e:\n # Propagate error if the user function caused an error:\n presented_output_store[\"user_write_error\"] = str(e)\n try:\n process.kill()\n except Exception:\n pass\n presented_output_store[\"data\"] += data", "def write(self, bytes_):\n logger.debug(\"Sending: {} bytes\".format(len(bytes_)))\n wrote = self.impl.write(bytes_)\n logger.debug(\"Sent: {} bytes\".format(len(bytes_)))\n return wrote", "def write_register(self, registeraddress, value, numberOfDecimals=0, functioncode=16, signed=False):\n _checkFunctioncode(functioncode, [6, 16])\n _checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals')\n _checkBool(signed, description='signed')\n _checkNumerical(value, description='input value')\n\n self._genericCommand(functioncode, registeraddress, value, numberOfDecimals, signed=signed)" ]
[ "0.6561378", "0.62848", "0.62789536", "0.6099681", "0.60418934", "0.56204706", "0.56024796", "0.5577447", "0.5551332", "0.5468142", "0.5419342", "0.5402256", "0.53703344", "0.53703344", "0.53521544", "0.5314751", "0.530274", "0.5281684", "0.5278518", "0.52645814", "0.5243167", "0.5221809", "0.52162904", "0.51977026", "0.5191313", "0.51826346", "0.5173923", "0.5171357", "0.516133", "0.5150755" ]
0.76888937
0
This function uses self.get_state to find the locations of the robot and ball and returns a number in [0, NUM_STATES) representing that state
def get_state_num(self): robot_state = self.get_state('turtlebot3_waffle_pi','world') ball_state = self.get_state('soccer_ball','world') # each object is in a "box" that is RESOLUTION meters wide. robot_xbox = np.ceil((robot_state.pose.position.x-Learn.FIELD_XLEFT)/Learn.RESOLUTION) robot_ybox = np.ceil(robot_state.pose.position.y/Learn.RESOLUTION) ball_xbox = np.ceil((ball_state.pose.position.x-Learn.FIELD_XLEFT)/Learn.RESOLUTION) ball_ybox = np.ceil(ball_state.pose.position.y/Learn.RESOLUTION) # the state is the combination of dx and dy. dx = int(ball_xbox - robot_xbox) dy = int(ball_ybox - robot_ybox) # adjusting to remove negative values for states dx += Learn.BOXES_X-1 dy += Learn.BOXES_Y-1 # converting to unique number between 0 and NSTATES-1: return (2*Learn.BOXES_X-1)*dy+dx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_state_arr(self):\n rpos = self.sim.getAgentPosition(self.robot_num)\n rvel = self.sim.getAgentVelocity(self.robot_num)\n rrad = self.sim.getAgentRadius(self.robot_num)\n v_pref = self.sim.getAgentMaxSpeed(self.robot_num)\n theta = math.atan2(rvel[1], rvel[0])\n # Robot's state entry. Note that goal is listed as the robot's current\n # position because we aren't using that goal as such, we are just\n # exploring.\n state = [\n rpos[0], rpos[1], rvel[0], rvel[1], rrad,\n self.headings[self.robot_num], rpos[0], rpos[1],\n v_pref, theta\n ]\n for agent in self.agents:\n if agent != self.robot_num: # We already accounted for the robot\n pos = self.sim.getAgentPosition(agent)\n vel = self.sim.getAgentVelocity(agent)\n rad = self.sim.getAgentRadius(agent)\n state.extend([pos[0], pos[1], vel[0], vel[1], rad,\n self.headings[agent]])\n for obs in self.obstacles:\n if len(obs) > 1:\n # Polygonal obstacle\n o = Polygon(obs)\n p = Point(rpos)\n p1, p2 = nearest_points(o, p)\n # Velocity is always 0 for obstacles\n # Heading is same as robot's\n state.extend([p1.x, p2.y, 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n else:\n # Point obstacle\n state.extend([obs[0][0], obs[0][1], 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n return state", "def get_state(self):\n curr_state = self.lidar_ranges[self.indices]\n # curr_state = (self.lidar_ranges[self.indices] - 5.) / 5.\n # print(curr_state)\n\n return curr_state", "def get_success_state_arr(self):\n rpos = self.sim.getAgentPosition(self.robot_num)\n rvel = self.sim.getAgentVelocity(self.robot_num)\n rrad = self.sim.getAgentRadius(self.robot_num)\n v_pref = self.sim.getAgentMaxSpeed(self.robot_num)\n theta = math.atan2(rvel[1], rvel[0])\n # Robot's state entry.\n state = [\n rpos[0], rpos[1], rvel[0], rvel[1], rrad,\n self.headings[self.robot_num], self.overall_robot_goal[0],\n self.overall_robot_goal[1], v_pref, theta\n ]\n for agent in self.agents:\n if agent != self.robot_num: # We already accounted for the robot\n pos = self.sim.getAgentPosition(agent)\n vel = self.sim.getAgentVelocity(agent)\n rad = self.sim.getAgentRadius(agent)\n state.extend([pos[0], pos[1], vel[0], vel[1], rad,\n self.headings[agent]])\n for obs in self.obstacles:\n if len(obs) > 1:\n # Polygonal obstacle\n o = Polygon(obs)\n p = Point(rpos)\n p1, p2 = nearest_points(o, p)\n # Velocity is always 0 for obstacles\n # Heading is same as robot's\n state.extend([p1.x, p2.y, 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n else:\n # Point obstacle\n state.extend([obs[0][0], obs[0][1], 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n return state", "def getState(self):\n return tuple([robot.getState() for robot in self.states.robots])", "def _state_index(state):\n delta_y, delta_x, bird_lmh, pipe_lmh, is_flapping = state\n actions, height, width, _, _, _ = Q.shape\n\n y = int((height / 2) + (delta_y / step_r) - 1)\n x = int((width / 2) + (delta_x / step_c) - 1)\n\n return y, x, bird_lmh, pipe_lmh, is_flapping", "def get_ball_position_and_state(self):\n return (self.x, self.y, self.is_ball_visible)", "def _get_current_state(self):\n\n # One hot encoding of illegal actions\n illegal_actions_one_hot = np.ones(len(self.vehicle_data[0]))\n if len(self.possible_actions) != 0:\n illegal_actions_one_hot[self.possible_actions] = 0\n\n # Calculate mandatory vehicles left to load\n mandatory_vehicles_left = self.vehicle_data[1] - self.number_of_vehicles_loaded\n\n return np.hstack((self.end_of_lanes,\n self.lowest_destination,\n mandatory_vehicles_left[self.mandatory_cargo_mask],\n illegal_actions_one_hot,\n self.current_Lane)).astype(np.int16)", "def StateMachine(self):\n if self.mode is ALL:\n self.which_state()\n\n if self.current_state == FB:\n # print(\"FORWARD/BACKWARD\")\n self.FB()\n elif self.current_state == LAT:\n # print(\"LATERAL\")\n self.LAT()\n elif self.current_state == ROT:\n # print(\"ROTATION\")\n self.ROT()\n elif self.current_state == COMBI:\n # print(\"COMBINED\")\n self.COMBI()\n\n return self.return_bezier_params()", "def get_reverse_state_arr(self):\n rpos = self.sim.getAgentPosition(self.robot_num)\n rvel = self.sim.getAgentVelocity(self.robot_num)\n rrad = self.sim.getAgentRadius(self.robot_num)\n v_pref = self.sim.getAgentMaxSpeed(self.robot_num)\n theta = math.atan2(rvel[1], rvel[0])\n # Robot's state entry.\n state = [\n rpos[0], rpos[1], rvel[0], rvel[1], rrad,\n self.headings[self.robot_num], self.overall_robot_goal[0],\n self.overall_robot_goal[1], v_pref, theta\n ]\n for obs in self.obstacles:\n if len(obs) > 1:\n # Polygonal obstacle\n o = Polygon(obs)\n p = Point(rpos)\n p1, p2 = nearest_points(o, p)\n # Velocity is always 0 for obstacles\n # Heading is same as robot's\n state.extend([p1.x, p2.y, 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n else:\n # Point obstacle\n state.extend([obs[0][0], obs[0][1], 0, 0, self.obs_width,\n self.headings[self.robot_num]])\n return state", "def get_observation_driver_state(self):\n next_state = np.zeros(self.n_grids)\n grids = list(self.grids.values())\n for idx, grid in enumerate(grids):\n if grid is not None:\n next_state[idx] = grid.get_idle_driver_numbers_loop()\n return next_state", "def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)", "def _get_state(self):\n start = self.design.first_unassigned_site\n return self.target.padded_encoding[\n start : start + 2 * self._env_config.state_radius + 1\n ]", "def get_observation_from_state(state: State) -> List[int]:\n return state.robots_data + [state.time] + state.positions", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_state(self):\n return self.get_pose()", "def state(self):\n lines = self.state_lines()\n for line in lines:\n if set(line) == {State.X_WON}:\n return State.X_WON\n if set(line) == {State.O_WON}:\n return State.O_WON\n if not any(map(lambda line: State.IN_PROGRESS in line, lines)):\n return State.DRAW\n return State.IN_PROGRESS", "def _get_state(self):\n\n # stack all variables and return state array\n state = np.hstack((self.sheep_com, self.farthest_sheep, \n self.target, self.dog_pose, self.radius_sheep, \n self.target_distance))\n return state", "def calculate_state(self):\n\t\tif self.state_type == 'Queues':\n\t\t\t#self.queue_state =\\\n\t\t\t#[0. if movement.AttValue('QLen(Current, Last)') is None else movement.AttValue('QLen(Current, Last)') for movement in self.lanes_movement]\n\n\t\t\tself.queue_state =\\\n\t\t\t[0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\n\t\t\tstate = np.array(self.queue_state)[np.newaxis,:]\n\n\t\tif self.state_type == \"QueuesSig\":\n\n\t\t\tself.queue_state =\\\n\t\t\t[0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\n\t\t\tstate = np.array(self.queue_state+[self.next_action_key])[np.newaxis,:]\n\t\n\t\treturn(state)", "def robotCode(self):\n objectDetected, objectDistances = self.senseObstacles()\n return objectDetected, objectDistances", "def eval(self, state):\n valueOfPlayers = 0\n valueOfRebelAdvancments = 0\n valueOfLocations = 0\n\n\n\n for coordinate in state.gameState:\n if state.gameState[coordinate]==state.blank:\n continue\n elif state.gameState[coordinate]==state.rebel:\n valueOfRebelAdvancments = -coordinate[0]\n elif state.gameState[coordinate]==state.jedi:\n continue\n elif state.gameState[coordinate]==state.sith:\n continue\n \n valueOfLocations += valueOfRebelAdvancments\n\n \n valueOfPlayers = state.numRebels + 4*state.numJedi - 4*state.numSith\n \n return valueOfPlayers*4 + valueOfLocations", "def _get_state(self):\n print(\"GET STATE\")\n res = self._send_command(\n \"RS;\",\n fb_required=True,\n res_pattern=\"STATE:\")\n # The received answer is supposed to be something like\n # STATE:0|1|-1\n state = int(res.split(':')[1])\n if state == PVDriver.IDLE:\n return \"IDLE\"\n elif state == PVDriver.MOVING:\n return \"MOVING\"\n else:\n return \"ERROR\"", "def computePosition(self, state):\n d = 0\n if state[5] == \"East\":\n d = 0\n elif state[5] == \"West\":\n d = 1\n elif state[5] == \"North\":\n d = 2\n else:\n d = 3\n return state[0]*64+state[1]*32+state[2]*16+state[3]*8+state[4]*4+d", "def getCurrentState (events_counters, states):\n gamma_raw = 0\n if events_counters[0] + events_counters[2] == 0:\n gamma_raw = -1000\n else:\n gamma_raw = float (events_counters[0]) / (float (events_counters[0]) +\n float (events_counters[2])) \n\n theta_raw = 0\n if events_counters[1] + events_counters[3] == 0:\n theta_raw = -1000\n else: \n theta_raw = float (events_counters[1]) / (float (events_counters[1]) +\n float (events_counters[3]))\n\n #print (\"gamma_raw = {}; theta_raw = {}\".format (gamma_raw, theta_raw))\n min_dist1 = 1\n target_ind1 = 0\n min_dist2 = 1\n target_ind2 = 0 \n for ind1 in range (len (states[0])):\n if math.fabs (states[0][ind1] - gamma_raw) <= min_dist1:\n min_dist1 = math.fabs (states[0][ind1] - gamma_raw)\n target_ind1 = ind1\n\n for ind2 in range (len (states[1])):\n if math.fabs (states[1][ind2] - theta_raw) <= min_dist2:\n min_dist2 = math.fabs (states[1][ind2] - theta_raw)\n target_ind2 = ind2\n #print (\"gamma = {}; theta = {}\".format (states[0][target_ind1], states[1][target_ind2]))\n return (target_ind1, target_ind2)", "def get_robot_occupancy(self): \n occupancy = np.zeros(self.no_robots)\n for i in range(self.no_robots):\n status_topic = '/robot_' + str(i) + '/move_base/status'\n msg = rospy.wait_for_message(status_topic, GoalStatusArray)\n msg_list = msg.status_list\n if msg_list == []:\n occupancy[i] = 0\n else:\n if len(msg_list) > 1:\n robot_status = msg_list[-1].status\n else:\n robot_status = msg_list[0].status\n\n if (robot_status == 1) or (robot_status == 0) or (robot_status == 7): # BUG pazi tuki je lahko se kaksna fora ker je teh statusov like 10\n occupancy[i] = 1 # robot on move\n else:\n occupancy[i] = 0 # robot on goal\n return occupancy", "def _get_state(self):\n # COMPUTE CLASSIFIER_STATE\n predictions = self.model.predict_proba(self.dataset.state_data)[:,0]\n predictions = np.array(predictions)\n idx = np.argsort(predictions)\n # the state representation is the *sorted* list of scores \n classifier_state = predictions[idx]\n \n # COMPUTE ACTION_STATE\n unknown_data = self.dataset.train_data[self.indeces_unknown,:]\n # prediction (score) of classifier on each unlabelled sample\n a1 = self.model.predict_proba(unknown_data)[:,0]\n # average distance to every unlabelled datapoint\n a2 = np.mean(self.dataset.distances[self.indeces_unknown,:][:,self.indeces_unknown],axis=0)\n # average distance to every labelled datapoint\n a3 = np.mean(self.dataset.distances[self.indeces_known,:][:,self.indeces_unknown],axis=0)\n next_action_state = np.concatenate(([a1], [a2], [a3]), axis=0)\n return classifier_state, next_action_state", "def get_state(self):\n\n return self.t, self.x", "def _getState(self, board):\r\n mySide = board.mySide(self.id)\r\n oppSide = board.oppSide(self.id)\r\n myMancala = board.stonesInMyMancala(self.id)\r\n oppMancala = board.stonesInOppMancala(self.id)\r\n \r\n state = [] # size should be inputSize - 1\r\n state.append(float(myMancala))\r\n# for i in range(self.rowSize):\r\n# state.append(mySide[i])\r\n for my in mySide:\r\n state.append(float(my))\r\n state.append(float(oppMancala))\r\n# for i in range(self.rowSize):\r\n# state.append(oppSide[i])\r\n for op in oppSide:\r\n state.append(float(op))\r\n return state", "def state(self):\r\n\r\n #Mark in wich direction is the prey\r\n prescence_prey_right = 1 if (self.prey.position[0] > self.body[0].position[0]) else 0\r\n prescence_prey_left = 1 if (self.prey.position[0] < self.body[0].position[0]) else 0\r\n prescence_prey_up = 1 if (self.prey.position[1] < self.body[0].position[1]) else 0\r\n prescence_prey_down = 1 if (self.prey.position[1] > self.body[0].position[1]) else 0\r\n #Direction where is moving\r\n actual_direction_right = 1 if (self.velocities[0] == 1) else 0\r\n actual_direction_left = 1 if (self.velocities[0] == -1) else 0\r\n actual_direction_up = 1 if (self.velocities[1] == -1) else 0\r\n actual_direction_down = 1 if (self.velocities[1] == 1) else 0\r\n #Mark if is an obstacle\r\n obstacles = np.ravel(self.obstacles())\r\n \r\n return (np.concatenate((\r\n [prescence_prey_right,\r\n prescence_prey_left,\r\n prescence_prey_up,\r\n prescence_prey_down,\r\n actual_direction_right,\r\n actual_direction_left,\r\n actual_direction_up,\r\n actual_direction_down],\r\n obstacles\r\n )))", "def actions(self, state):\n \"*** YOUR CODE HERE ***\"\n if state[2] == 0: # When agent is facing North\n state_fw = (state[0], state[1] + 1, 0)\n state_tr = (state[0], state[1], 3)\n state_tl = (state[0], state[1], 1)\n elif state[2] == 1: # When agent is facing West\n state_fw = (state[0] - 1, state[1], 1)\n state_tr = (state[0], state[1], 0)\n state_tl = (state[0], state[1], 2)\n elif state[2] == 2: # When agent is facing South\n state_fw = (state[0], state[1] - 1, 2)\n state_tr = (state[0], state[1], 1)\n state_tl = (state[0], state[1], 3)\n elif state[2] == 3: # When agent is facing East\n state_fw = (state[0] + 1, state[1], 3)\n state_tr = (state[0], state[1], 2)\n state_tl = (state[0], state[1], 0)\n else:\n raise Exception(\"This shouldn't be happening. Can't find heading\")\n \n shoot_loc_arr = [] # Initialize Array\n for allowed_state in self.allowed: # Iterate through all allowed states\n for goal_state in self.goals: # Iterate through all goal states\n if allowed_state[0] == goal_state[0] and allowed_state[1] < goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 0)) # X Matches, Head North\n if allowed_state[0] > goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 1)) # Y Matches, Head West\n if allowed_state[0] == goal_state[0] and allowed_state[1] > goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 2)) # X Matches, Head South\n if allowed_state[0] < goal_state[0] and allowed_state[1] == goal_state[1]: shoot_loc_arr.append((allowed_state[0], allowed_state[1], 3)) # Y Matches, Head East \n\n dist_fw_arr, dist_tr_arr, dist_tl_arr = ([9999999] for i in range(3)) # Initialize to large values\n for goal in shoot_loc_arr: # Iterate through arrays\n if (state_fw[0],state_fw[1]) in self.allowed:\n dist_fw_arr.append(manhattan_distance_with_heading(state_fw, goal))\n dist_tr_arr.append(manhattan_distance_with_heading(state_tr, goal))\n dist_tl_arr.append(manhattan_distance_with_heading(state_tl, goal))\n\n if (min(dist_fw_arr) <= min(min(dist_tr_arr),min(dist_tl_arr))) and (state_fw[0],state_fw[1]) in self.allowed: return ['Forward']\n if min(dist_tr_arr) <= min(min(dist_fw_arr),min(dist_tl_arr)): return ['TurnRight']\n if min(dist_tl_arr) <= min(min(dist_tr_arr),min(dist_tr_arr)): return ['TurnLeft']\n raise Exception(\"This shouldn't be happening. Can't determine action\")" ]
[ "0.7166662", "0.6894224", "0.6861624", "0.6600041", "0.6529893", "0.6403548", "0.63501287", "0.63484675", "0.63344824", "0.6219632", "0.61681604", "0.6102465", "0.60983646", "0.60495335", "0.60495335", "0.6021469", "0.599206", "0.59578645", "0.59425974", "0.5925313", "0.59148824", "0.5910201", "0.5909496", "0.58994967", "0.5897811", "0.58881485", "0.58828795", "0.5862437", "0.58495486", "0.5822147" ]
0.8323607
0
Given (x, y) coordinates for the gazebo world, moves the turtlebot to that location using self.set
def set_robot(self, x, y): state = ModelState() state.model_name = 'turtlebot3_waffle_pi' state.reference_frame = 'world' # pose state.pose.position.x = x state.pose.position.y = y state.pose.position.z = 0 quaternion = tf.transformations.quaternion_from_euler(0, 0, 0) state.pose.orientation.x = quaternion[0] state.pose.orientation.y = quaternion[1] state.pose.orientation.z = quaternion[2] state.pose.orientation.w = quaternion[3] # twist state.twist.linear.x = 0 state.twist.linear.y = 0 state.twist.linear.z = 0 state.twist.angular.x = 0 state.twist.angular.y = 0 state.twist.angular.z = 0 rospy.wait_for_service('/gazebo/set_model_state') try: set_state = self.set_state result = set_state(state) assert result.success is True except rospy.ServiceException: print("/gazebo/get_model_state service call failed")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def goto(x, y):\n turtleTmp.setposition(x, y)", "def set_new_location(self, xPos, yPos):", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def move_turtle(self, x, y):\n tortuga = self.turtle\n if self.capture_mode:\n tortuga.setheading(tortuga.towards(x, y))\n tortuga.setpos(x, y)\n self.add_punto(Punto(x, y))", "def set_location(self, x, y):\n self.scene.set_location(x, y)\n self.redraw()", "def teleport(self, x, y):\n self.rect.x = x\n self.rect.y = y", "def drawTo(self, x, y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n self._turtle.setposition(x, y)", "def move(self,x,y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n fstate = self._turtle.fill()\n if fstate: # only need to do this if in mid-fill\n self._turtle.fill(False)\n self._turtle.penup()\n self._turtle.setposition(x,y)\n self._turtle.pendown()\n if fstate: # only need to do this if in mid-fill\n self._turtle.fill(True)", "def move_to(self, x, y):\r\n self.__current_room = x, y", "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def move(self,x,y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n d = self._turtle.isdown()\n if d:\n self._turtle.penup()\n self._turtle.setposition(x,y)\n if d:\n self._turtle.pendown()", "def moveturtle(x,y,t):\n t.penup()\n t.goto(x,y)\n t.pendown()", "def __activate(self, x: int, y: int, tree: int) -> None:\n self.__maze[x, y] = tree", "def move_turtle(self):\n self.forward(self.move_speed)", "def move_to(self, destination_coords):\n self.x = destination_coords[0]\n self.y = destination_coords[1]\n return", "def m_location_set(self, x: int, y: int):\n pass", "def goto(self, x, y):\n # note that the snake can get outside of the canvas!\n if(self._gridmode):\n self._x = round(x)\n self._y = round(y)\n else:\n self._x = round(x, 2)\n self._y = round(y, 2)\n \n self._appendCurrentState()", "def move(self, x, y):\n self.x = x\n self.y = y\n self.call('move', x, y)", "def move(self,x,y):\n self.pos.x = x\n self.pos.y = y", "def _move_tetrino(self, tetrino, x, y):\n tetrino.location_offset[constant.X] += x\n tetrino.location_offset[constant.Y] += y\n tetrino.update_location()", "def set(self, x, y):\n self.x = x\n self.y = y", "def set_coordinates(self, x, y):\n self.x = x\n self.y = y", "def move(self, x, y):\r\n if self.brush_on:\r\n for lx, ly in line(self.pos_x, self.pos_y, x, y):\r\n self.set(lx, ly)\r\n\r\n self.pos_x = x\r\n self.pos_y = y", "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "def run(self):\n # type: () -> None\n self.move_to(self.location)", "def setRoboPos(self,x,y):\r\n self.RoboPosX=x\r\n self.RoboPosY=y", "def place(self,y,x):\n self.y = y\n self.x = x" ]
[ "0.72980785", "0.68223137", "0.6769516", "0.6769516", "0.6756576", "0.67504764", "0.67297053", "0.6659463", "0.6652474", "0.6623022", "0.6590633", "0.6578206", "0.64994663", "0.649511", "0.6484023", "0.6453467", "0.6437511", "0.63293874", "0.6307494", "0.63042516", "0.6255841", "0.62450206", "0.6207394", "0.61347085", "0.6133307", "0.61291087", "0.6109214", "0.610862", "0.60806036", "0.60734147" ]
0.68465525
1
Given an action in (self.MOVE_LEFT, self.STAY_PUT, self.MOVE_RIGHT], performs that action by moving the turtlebot accordingly.
def apply_action(self, action): robot_state = self.get_state('turtlebot3_waffle_pi','world') robot_x = robot_state.pose.position.x robot_y = robot_state.pose.position.y # Set the distance moved in an action such that it is at least as large as the # minimum distance that would let a robot in the middle of the goal go to either side #self.move_dist = max(((C.GOAL_TOP + C.GOAL_BOTTOM) / 2) / C.NUM_POS_SENDS, 0.5) if action == Learn.MOVE_LEFT: print("Move left") self.set_robot(robot_x, robot_y+self.move_dist) elif action == Learn.MOVE_RIGHT: print("Move right") self.set_robot(robot_x, robot_y-self.move_dist) else: print("Stay put")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(o, action):\n # if action not in Act: raise...?\n { Act.Down : lambda: o.applyGravity(),\n Act.Left : lambda: o._tryShift(o.block,Point(-1,0)),\n Act.Right : lambda: o._tryShift(o.block,Point( 1,0)),\n Act.Drop : lambda: o._setBlock(o.shadowBlock),\n Act.Hold : lambda: o._Hold(),\n Act.RotCW : lambda: o._Rotate(clockwise),\n Act.RotCCW: lambda: o._Rotate(counterClockwise),\n }[action]()", "def step(self, action):\n # print(action)\n distances = self.agent.return_distances(self.agent.corners, self.agent.line_pos)\n\n left = distances[0]\n right = distances[1]\n self.agent.distances.append({\n 'left': left,\n 'right': right\n })\n reward = 0\n if action == 1:\n self.agent.angle -= 90\n if self.agent.angle < 0:\n self.agent.angle = 0\n self.agent.direction_history.append('left')\n self.reset_raycasts(self.agent.angle)\n self.render()\n if left > right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 2:\n self.agent.angle += 90\n if self.agent.angle >= 360:\n self.agent.angle = 0\n\n self.reset_raycasts(self.agent.angle)\n self.render()\n self.agent.direction_history.append('right')\n if left < right:\n reward += 5\n else:\n reward -= 5\n\n elif action == 0:\n self.agent.direction_history.append('forward')\n if self.agent.angle >= 360: self.agent.angle == 0\n if self.agent.angle == 0 or self.agent.angle == 360:\n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n \n if left + right >= 50:\n reward += 5\n\n self.render()\n\n elif action == 3:\n self.agent.direction_history.append('reverse')\n if self.agent.angle == 0:\n self.agent.agent_position['y'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 90: \n self.agent.agent_position['x'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 180: \n self.agent.agent_position['y'] -= 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n elif self.agent.angle == 270:\n self.agent.agent_position['x'] += 10\n self.reset_raycasts(self.agent.angle)\n self.render()\n \n if left + right <= 50:\n reward += 5\n\n \n else:\n reward -= 5\n\n if \"forward\" not in self.agent.direction_history[len(self.agent.direction_history)-6:len(self.agent.direction_history)-1]:\n reward -= 10\n\n \n info = {}\n if self.agent.check_collision():\n reward -= 10\n self.reset() \n self.agent.rewards.append({\n 'leftDistance': left,\n 'rightDistance': right,\n 'reward': reward,\n })\n self.render()\n print(f\"REWARD: {reward}\")\n # self.render()\n # print(self.agent.direction_history[-1])\n self.agent.rewards.append(reward)\n return np.array([left, right]), reward, False, info", "def do_step(self, action_ind):\n action_ind = action_ind.item()\n if len(self.last_actions) < self.last_action_capacity:\n self.last_actions.append(action_ind)\n self.last_actions[self.last_action_ind] = action_ind\n self.last_action_ind = (\n self.last_action_ind + 1) % self.last_action_capacity\n robot_max_vel = self.sim.getAgentMaxSpeed(self.robot_num)\n # Decode the action selection:\n # 0 => do nothing\n # 1-16 => set velocity to `robot_max_vel/2` at angle\n # `(action_ind-1) * 2pi/16`\n # 17-32 => velocity to `robot_max_vel` at angle\n # `(action_ind-17) * 2pi/16`\n # 33-34 => change heading by\n # else => do nothing\n vel = (0, 0)\n angle = self.headings[self.robot_num]\n if 1 <= action_ind <= 16:\n angle += (action_ind - 1)*(math.pi / 8)\n vel = (\n (robot_max_vel/2) * math.cos(angle),\n (robot_max_vel/2) * math.sin(angle)\n )\n elif 17 <= action_ind <= 32:\n angle += (action_ind - 17)*(math.pi / 8)\n vel = (\n robot_max_vel * math.cos(angle),\n robot_max_vel * math.sin(angle)\n )\n elif action_ind == 33:\n self.headings[self.robot_num] += self.rot_speed\n elif action_ind == 34:\n self.headings[self.robot_num] -= self.rot_speed\n self.headings[self.robot_num] = normalize(self.headings[\n self.robot_num])\n # Set the robot's goal given the action that was selected\n ts = self.sim.getTimeStep()\n pos = self.sim.getAgentPosition(self.robot_num)\n self.goals[self.robot_num] = (\n pos[0] + vel[0] * ts, pos[1] + vel[1] * ts\n )\n self.advance_simulation()", "def move(self, agent, action):\n\t\tpass", "def move(self, action):\n \n self.counter += 1\n\n if action not in self.ACTIONS:\n raise Exception(\"Invalid action\")\n\n \n\n d_x, d_y = self.MOVEMENTS[action]\n x, y = self.position\n new_x, new_y = x + d_x, y + d_y\n new_X,new_Y=self.position_to_xy(new_x, new_y)\n \n\n if (new_x, new_y) not in self.cases:\n return self._get_state(), -3, False, self.ACTIONS\n \n \n \n elif (self.openGoal(new_x,new_y))&(new_X>-400):\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n \n return self._get_state(), 20, True, self.ACTIONS\n \n # elif not self.openGoal(new_x,new_y):\n # self.position = new_x, new_y\n # self.positionxy = self.position_to_xy(new_x, new_y)\n # return self._get_state(), -1, False, self.ACTIONS\n \n elif self.counter > 100:\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n return self._get_state(), -1, True, self.ACTIONS\n \n else:\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n return self._get_state(), -1, False, self.ACTIONS", "def execute_action(self, action):\n if self.game_over or len(self.agent_locs) == 0:\n pass\n elif action.startswith(\"MOVE \"):\n direction = ORIENTATION[action[5:]]\n flip = 2 if direction == 6 else 0\n if direction < 4:\n self.execute_actions(direction + 1)\n else:\n # Relative direction. Either forward (4) or backward (6)\n direction = self.orientation ^ flip\n self.execute_actions(direction + 1)\n self.orientation ^= flip\n self.game_over = self.has_exited().any()\n elif action.startswith(\"TURN \"):\n direction = ORIENTATION[action[5:]]\n self.orientation += 2 - direction\n self.orientation %= 4\n elif action.startswith(\"FACE \"):\n self.orientation = ORIENTATION[action[5:]]\n elif action.startswith(\"TOGGLE\"):\n if len(action) > 6:\n # Toggle in a particular direction\n direction = ORIENTATION[action[7:]]\n else:\n direction = self.orientation\n self.execute_actions(direction + 5)\n elif action in (\"RESTART\", \"ABORT LEVEL\", \"PREV LEVEL\", \"NEXT LEVEL\"):\n self.game_over = action\n return 0", "def action():\n if str(value).lower() == \"taxi\":\n self.taxi()\n elif str(value).lower() == \"fly\":\n self.pre_fly()\n self.fly()\n self.post_fly()\n elif str(value).lower() == \"return\":\n self.motor.move(self.return_position)", "def move(self, action): # Good\n if action == 0:\n dx, dy = 0, 1\n elif action == 1:\n dx, dy = 1, 0\n elif action == 2:\n dx, dy = 0, -1\n elif action == 3:\n dx, dy = -1, 0\n else:\n dx, dy = 0, 0\n\n # Check for max speed\n if ((self.vel_x + dx)**2 + (self.vel_y + dy)**2) \\\n <= self.max_speed_sq:\n self.x_vel += dx\n self.y_vel += dy\n\n self.prev_pos = self.center\n super(Player, self).move()", "def change_movement(self, action):\r\n if action == \"diagonal\" and self.movement != \"diagonal\":\r\n self.movement = \"diagonal\"\r\n self.x_speed = 3\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_diagonal)\r\n elif action == \"horizontal\" and self.movement != \"horizontal\":\r\n self.movement = \"horizontal\"\r\n self.x_speed = 3\r\n self.y_speed = 0\r\n self.canvas.after(50, self.move_horizontal)\r\n elif action == \"vertical\" and self.movement != \"vertical\":\r\n self.movement = \"vertical\"\r\n self.x_speed = 0\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_vertical)\r\n elif action == \"inward_outward\":\r\n self.movement = \"inward_outward\"\r\n self.canvas.after(50, self.move_inward_outward)", "def execute_action(self, agent, action):\n if self.is_done():\n return\n agent.bump = False\n axloc, ayloc = action\n self.executing_agent = agent\n sensor, state = self.move((axloc, ayloc))\n agent.check_status(sensor, state)", "def play_step(self, action):\n self.players[0].moving_left = False\n self.players[0].moving_right = False\n if action == MOVE_LEFT:\n self.players[0].moving_left = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_left = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == MOVE_RIGHT:\n self.players[0].moving_right = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_right = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == SHOOT:\n if self.dead_player or not self.players[0].is_alive:\n self.update(is_a_star=True)\n return\n if not self.players[0].weapon.is_active:\n self.players[0].shoot()\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n if self.dead_player or not self.players[0].is_alive:\n return", "def _set_action(self, action):\n\n rospy.logdebug(\"Start Set Action ==>\"+str(action))\n # We convert the actions to speed movements to send to the parent class of Parrot\n linear_speed_vector = Vector3()\n angular_speed = 0.0\n\n if action == 0: # FORWARDS\n linear_speed_vector.x = self.linear_forward_speed\n self.last_action = \"FORWARDS\"\n elif action == 1: # BACKWARDS\n linear_speed_vector.x = -1*self.linear_forward_speed\n self.last_action = \"BACKWARDS\"\n elif action == 2: # STRAFE_LEFT\n linear_speed_vector.y = self.linear_forward_speed\n self.last_action = \"STRAFE_LEFT\"\n elif action == 3: # STRAFE_RIGHT\n linear_speed_vector.y = -1*self.linear_forward_speed\n self.last_action = \"STRAFE_RIGHT\"\n elif action == 4: # UP\n linear_speed_vector.z = self.linear_forward_speed\n self.last_action = \"UP\"\n elif action == 5: # DOWN\n linear_speed_vector.z = -1*self.linear_forward_speed\n self.last_action = \"DOWN\"\n\n # We tell drone the linear and angular speed to set to execute\n self.move_base(linear_speed_vector,\n angular_speed,\n epsilon=0.05,\n update_rate=10)\n\n rospy.logdebug(\"END Set Action ==>\"+str(action))", "def step(self, action):\n # print(\"############################\")\n # print(\"action: {}\".format(action))\n\n self.movement_complete.data = False\n\n # 1) Read last joint positions by getting the observation before acting\n old_observation = self.get_obs()\n\n # 2) Get the new joint positions according to chosen action (actions here are the joint increments)\n if self._joint_increment is None:\n next_action_position = action\n else:\n next_action_position = self.get_action_to_position(action, old_observation[1:7])\n\n # 3) Move to position and wait for moveit to complete the execution\n self.publisher_to_moveit_object.pub_joints_to_moveit(next_action_position)\n # rospy.wait_for_message(\"/pickbot/movement_complete\", Bool)\n while not self.movement_complete.data:\n pass\n\n start_ros_time = rospy.Time.now()\n while True:\n # Check collision:\n # invalid_collision = self.get_collisions()\n # if invalid_collision:\n # print(\">>>>>>>>>> Collision: RESET <<<<<<<<<<<<<<<\")\n # observation = self.get_obs()\n # reward = UMath.compute_reward(observation, -200, True)\n # observation = self.get_obs()\n # print(\"Test Joint: {}\".format(np.around(observation[1:7], decimals=3)))\n # return U.get_state(observation), reward, True, {}\n\n elapsed_time = rospy.Time.now() - start_ros_time\n if np.isclose(next_action_position, self.joints_state.position, rtol=0.0, atol=0.01).all():\n break\n elif elapsed_time > rospy.Duration(2): # time out\n break\n # time.sleep(s\n\n \"\"\"\n #execute action as long as the current position is close to the target position and there is no invalid collision and time spend in the while loop is below 1.2 seconds to avoid beeing stuck touching the object and not beeing able to go to the desired position \n time1=time.time()\n while np.linalg.norm(np.asarray(self.joints_state.position)-np.asarray(next_action_position))>0.1 and self.get_collisions()==False and time.time()-time1<0.1: \n rospy.loginfo(\"Not yet reached target position and no collision\")\n \"\"\"\n # 4) Get new observation and update min_distance after performing the action\n new_observation = self.get_obs()\n if new_observation[0] < self.min_distace:\n self.min_distace = new_observation[0]\n # print(\"observ: {}\".format( np.around(new_observation[1:7], decimals=3)))\n\n # 5) Convert Observations into state\n state = U.get_state(new_observation)\n\n # 6) Check if its done, calculate done_reward\n done, done_reward, invalid_contact = self.is_done(new_observation)\n\n # 7) Calculate reward based on Observatin and done_reward and update the accumulated Episode Reward\n reward = UMath.compute_reward(new_observation, done_reward, invalid_contact)\n\n ### TEST ###\n if done:\n joint_pos = self.joints_state.position\n print(\"Joint in step (done): {}\".format(np.around(joint_pos, decimals=3)))\n ### END of TEST ###\n\n self.accumulated_episode_reward += reward\n\n self.episode_steps += 1\n\n return state, reward, done, {}", "def decide_move(self, action):\n x1, y1 = action['xy1']\n x2, y2 = action['xy2']\n self.__state.push(action)", "def step(self, action):\n\n action[1] = 0 if action[1] < 0 else 1\n\n if not self.moving:\n self.agent_host.sendCommand(\"move 0.5\")\n time.sleep(.2)\n self.moving = True\n\n # Get Action\n command = \"strafe \" + str(action[0])\n if ((action[0] < 0 and self.allow_left) or (action[0] > 0 and self.allow_right)):\n self.agent_host.sendCommand(command)\n time.sleep(.2)\n self.agent_host.sendCommand(\"strafe 0\")\n time.sleep(.1)\n\n if action[1]:\n if self.checkCommand:\n self.jumpsOverDitches += 1\n self.checkCommand = False\n self.agent_host.sendCommand(\"jump 1\")\n time.sleep(.2)\n self.agent_host.sendCommand(\"jump 0\")\n\n # if (command == \"crouch 1\"):\n # self.agent_host.sendCommand(command)\n # time.sleep(.3)\n # self.agent_host.sendCommand(\"crouch 0\")\n # time.sleep(.2)\n\n self.episode_step += 1\n\n # Get Observation\n world_state = self.agent_host.getWorldState()\n for error in world_state.errors:\n print(\"Error:\", error.text)\n self.obs, self.allow_left, self.allow_right, curZPos, curXPos = self.get_observation(world_state)\n if curZPos:\n self.curZPos = curZPos\n if curXPos:\n if self.obs[3 + int(curXPos)]:\n self.checkCommand = True\n self.numDitchesEncountered += 1\n\n # Get Done\n done = not world_state.is_mission_running \n\n # Get Reward\n reward = 0\n for r in world_state.rewards:\n reward += r.getValue()\n self.episode_return += reward\n\n return self.obs, reward, done, dict()", "def execute_action(self, agent, action):\n agent.bump = False\n agent.performance_measure -= 1\n \n if action == 'TurnRight':\n agent.heading = self.turn_heading(agent.heading, -1)\n elif action == 'TurnLeft':\n agent.heading = self.turn_heading(agent.heading, +1)\n elif action == 'Forward':\n self.move_to(agent, vector_add(self.heading_to_vector(agent.heading),\n agent.location))\n elif action == 'Grab':\n if self.some_things_at(agent.location, tclass=Gold):\n try:\n gold = self.list_things_at(agent.location, tclass=Gold)[0]\n agent.has_gold = True\n self.delete_thing(gold)\n except:\n print \"Error: Gold should be here, but couldn't find it!\"\n print 'All things:', self.list_things_at(agent.location)\n print 'Gold?:', self.list_things_at(agent.location, tclass=Gold)\n sys.exit(-1)\n\n elif action == 'Release':\n if agent.location == self.entrance:\n if agent.has_gold:\n agent.performance_measure += 1000\n self.done = True\n elif action == 'Shoot':\n if agent.has_arrow:\n agent.has_arrow = False\n agent.performance_measure -= 10\n self.shoot_arrow(agent)\n elif action == 'Stop':\n self.done = True\n \n print '\\nCurrent Location: ', agent.location\n print 'Heading: ', self.heading_to_str(agent.heading)\n print 'Reminder- Start Location:', self.entrance\n print ''\n print 'Percepts:'", "def move_turtle(self):\n self.forward(self.move_speed)", "def go(self):\n global Moving\n\n if TargetVal > ActualVal:\n Moving = 'bak'\n elif TargetVal < ActualVal:\n Moving = 'fwd'\n\n MoveMotor()", "def do_move_action(self, move_action: action.Move) -> None:\n self.logger.debug(\"Executing action.Move: %s\", move_action)\n self.reset_selection()\n # First move the cursor on the unit to move\n first = MoveCursorAnimation(self.cursor.coord, move_action.who.coord)\n # Then select the unit to move\n first.next = SelectAndWait(move_action.who.coord)\n # Then move the cursor on the destination\n first.next.next = MoveCursorAnimation(move_action.who.coord, move_action.where)\n # Finally play the unit move animation\n first.next.next.next = self.make_move_unit_animation(move_action.who, move_action.where)\n self.add_child(first)", "def execute_action(self, agent, action):\n if action == 'Right':\n agent.location = loc_B\n agent.performance -= 1\n elif action == 'Left':\n agent.location = loc_A\n agent.performance -= 1\n elif action == 'Suck':\n if self.status[agent.location] == 'Dirty':\n agent.performance += 10\n self.status[agent.location] = 'Clean'", "def execute_action(self, agent, action):\n if action == 'Right':\n agent.location = loc_B\n agent.performance -= 1\n elif action == 'Left':\n agent.location = loc_A\n agent.performance -= 1\n elif action == 'Suck':\n if self.status[agent.location] == 'Dirty':\n agent.performance += 10\n self.status[agent.location] = 'Clean'", "def primitive_action(game, action):\n if action == \"north\":\n game.move(NORTH)\n elif action == \"south\":\n game.move(SOUTH)\n elif action == \"east\":\n game.move(EAST)\n elif action == \"west\":\n game.move(WEST)\n elif action == \"chop\":\n game.chop()\n elif action == \"harvest\":\n game.harvest()\n elif action == \"deposit\":\n game.deposit()\n\n return game", "def move(self, direction):\n\n if direction == \"north\":\n self.go_and_update(-1, 0)\n\n elif direction == \"south\":\n self.go_and_update(1, 0)\n\n elif direction == \"east\":\n self.go_and_update(0, 1)\n\n elif direction == \"west\":\n self.go_and_update(0, -1)", "def movement(self, action):\r\n\r\n #if its moving horizontally only can move vertically in the next move\r\n if self.velocities[1] == 0:\r\n if action == 0 :\r\n self.velocities[0] = 0\r\n self.velocities[1] = -1\r\n if action == 1 :\r\n self.velocities[0] = 0\r\n self.velocities[1] = 1\r\n\r\n #if its moving vertically only can move horizontally in the next move\r\n if self.velocities[0] == 0:\r\n if action == 2 :\r\n self.velocities[0] = -1\r\n self.velocities[1] = 0\r\n if action == 3 :\r\n self.velocities[0] = 1\r\n self.velocities[1] = 0\r\n \r\n self.displacement()", "def move():\n Robot.move()", "def makeMove(game, action):\n key = 0\n if action == 0:\n # Press D\n key = pygame.K_d\n elif action == 1:\n # Press A\n key = pygame.K_a\n elif action == 2:\n # Press W\n key = pygame.K_w\n elif action == 3:\n # Press S\n key = pygame.K_s\n elif action == 4:\n # Press SPACE\n key = pygame.K_SPACE\n else:\n # Should not happen\n raise('Invalid Action')\n\n game.on_key_press(key)\n game.on_key_release(key)\n\n # Forward game more than one frame\n for _ in range(10):\n game.update()\n game.render()", "def _step(self, action: np.ndarray):\n # TODO: How do deal with goal changing?\n denormalize = False if self.use_raw_actions else True\n current_pos = self.sim.data.mocap_pos.copy()\n meanval = (self.mocap_pos_clip_upper + self.mocap_pos_clip_lower)/2.0\n rng = (self.mocap_pos_clip_upper - self.mocap_pos_clip_lower)/2.0\n new_pos = action[:3]*rng + meanval #current_pos + action[:3]*self.range\n # new_pos = current_pos + action[:3]*self.range\n new_pos = np.clip(new_pos, self.mocap_pos_clip_lower, self.mocap_pos_clip_upper)\n self.sim.data.mocap_pos[:] = new_pos.copy()\n self.robot.step({\n 'gripper': action[-2:]\n }, denormalize)", "def move(self, action):\n self.time += 1\n\n # If ship is destroyed ship can only contemplate sadness and despair\n if not action or not self.is_playable():\n return None\n\n self.actualise = False\n\n if self.leroy_time == 1:\n self.back_to_normal()\n if self.leroy_time > 0:\n self.leroy_time -= 1\n\n # there is a chance that the ia enter in leroy mode\n # the ia goes mad for some time, acting randomly\n # added to allow the ships to explore the possible actions and not stay passive\n if not self.player and self.leroy_time == 0 and self.agent.behavior == \"network\" and random() < LEROY_RATE:\n self.leroy_jenkins()\n\n # training reward depending on position\n # self.agent.reward = self.go_bottom_reward()\n\n if isinstance(action, ActionOneHot):\n if action.pointing:\n self.pointing = Point(randint(0, DEFAULT_WIDTH-1), randint(0, DEFAULT_HEIGHT-1))\n elif isinstance(action, Action):\n if action.pointing:\n self.pointing = action.pointing\n # print(\"action.pointing\", action.pointing)\n # print(\"turn \", self.direction)\n\n if action.thrust:\n self.thrust()\n if action.shoot:\n self.shoot()", "def step(self, actions): # actions is a list,\n\n assert len(actions) == len(self.agents), \"Number of actions (\" + str(\n len(actions)) + \") does not match number of agents (\" + str(self.n_agents) + \")\"\n\n # Process movement based on real states (not belief)\n\n\n rewards = [0.] * self.n_agents\n\n reward = 0.\n\n\n nextcells = [None] * self.n_agents\n rand_nums = self.rng.uniform(size=self.n_agents)\n\n for i in range(self.n_agents):\n\n currcell = self.tocellcoord[self.agents[i].state]\n if isinstance(actions,int):\n act = actions\n else:\n act = actions[i]\n direction = self.directions[act]\n\n if rand_nums[i] > 1/3: # pick action as intended\n if self.occupancy[tuple(currcell + direction)] == 0:\n nextcells[i] = self.tocellnum[tuple(currcell+direction)]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n else: # pick random action, except one initially intended\n adj_cells = self.adjacent_to(currcell) # returns list of tuples\n adj_cells.remove(tuple(currcell+direction))\n\n index = self.rng.choice(range(len(adj_cells)))\n new_cell = adj_cells[i]\n\n if self.occupancy[new_cell] == 0:\n nextcells[i] = self.tocellnum[new_cell]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n\n # check for inter-agent collisions:\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n while(len(collisions) != 0): # While loop needed to handle edge cases\n for i in range(len(nextcells)):\n if nextcells[i] in collisions:\n nextcells[i] = self.agents[i].state # agent collided with another, so no movement\n\n\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n\n\n for i in range(self.n_agents):\n if nextcells[i] == self.agents[i].state: # A collision happened for this agent\n rewards[i] += self.collision_penalty\n else:\n s = nextcells[i] # movement is valid\n self.agents[i].state = s\n if s in self.goals and s not in self.discovered_goals:\n rewards[i] += self.goal_reward\n self.discovered_goals.append(s)\n #rewards[i] += broadcasts[i]*self.broadcast_penalty\n\n\n self.currstate = tuple(nextcells)\n\n\n\n reward = np.sum(rewards)\n\n self.step_count += 1\n\n\n # If all goals were discovered, end episode\n done = len(self.discovered_goals) == len(self.goals)\n\n \n return reward, self.currstate, done, None", "def move(self):\n if self.ycor() > 280: self.y_dir = -1 # Set vertical movement to down if ball at top of screen\n if self.xcor() > 380: self.x_dir = -1 # Set horizontal movement to left if ball at right of screen\n if self.xcor() < -380: self.x_dir = 1 # Set horizontal movement to right if ball at left of screen\n new_x = self.xcor() + self.x_dir * 2 # Define 2 spaces forward in set horizontal dir of travel\n new_y = self.ycor() + self.y_dir * 2 # Define 2 spaces forward in set vertical dir of travel\n self.goto(new_x, new_y) # Move ball to newly defined position" ]
[ "0.6865788", "0.68375564", "0.68262863", "0.681845", "0.67499447", "0.6709733", "0.6702826", "0.6600874", "0.6599175", "0.65985924", "0.6597936", "0.65388536", "0.65373313", "0.65184283", "0.6489433", "0.6484609", "0.6464075", "0.6452269", "0.64365", "0.6379417", "0.6379417", "0.6353324", "0.63322055", "0.6306298", "0.6227514", "0.62191725", "0.62111586", "0.6204539", "0.6197145", "0.617746" ]
0.7721951
0
Peform the QLearning algorithm until convergence of self.Q
def algorithm(self): convergence_threshold = 50 reward_num_threshold = 300 alpha = 1 gamma = 0.5 while (self.reward_num < reward_num_threshold) and (self.count<convergence_threshold): print('------') print('Iteration', self.reward_num, '/', reward_num_threshold) print('Iterations w/out Q-update:', self.count, '/', convergence_threshold) # select a possible action (any of them; all are valid) s = self.get_state_num() print("Initial state:", s) a = random.choice(np.arange(3)) self.apply_action(a) while self.reward == None: #print("Sleeping to wait for reward") rospy.sleep(0.5) reward = self.reward print("REWARD =", reward) self.reward = None if reward == 0: next_state = self.get_state_num() mx = np.amax(self.Q[next_state]) else: ## There is no next state if nonzero reward seen mx = 0 update = self.Q[s][a] + alpha*(reward+gamma*mx-self.Q[s][a]) if self.Q[s][a] != update: print("Update Q matrix") self.Q[s][a] = update self.count = 0 else: self.count += 1 print("Finished calculating Q-Matrix\n\n\n\n\n\n\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qlearning(env, iterations=1000, gamma=0.9, alpha=0.1):\n nS = env.nS # number of states\n nA = env.nA # number of actions\n Q_value = np.zeros((nS, nA))\n policy = np.ones((env.nS,env.nA))/env.nA\n epsilon = 1\n s_t1 = env.reset() # reset the environment and place the agent in the start square\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Don't forget to decay epsilon according to GLIE\n\n curr_state = s_t1\n \n start = time.time() # to time how long convergence takes\n print(\"---Q Learning---\\nTraining Started.\")\n \n for k in range (1, iterations):\n # if (k%10000) == 0:\n # print(\"Now playing iteration: \", k)\n epsilon = 1/k\n curr_action, reward, new_state, done = take_one_step(env, policy, curr_state)\n new_action = sample_action(policy, new_state)\n Q_value[curr_state, curr_action] = Q_value[curr_state, curr_action] + alpha * (reward + gamma * (Q_value[new_state, np.argmax(Q_value[new_state])]) - Q_value[curr_state, curr_action])\n \n # epsilon-greedy policy update\n Q_list = np.argwhere(Q_value[curr_state] == np.amax(Q_value[curr_state])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n for a in range (nA):\n if a == max_Q:\n policy[curr_state][a] = epsilon/nA + (1 - epsilon) # for the chosen maximal index of Q, set the policy to epsilon/m + 1 - epsilon\n else:\n policy[curr_state][a] = epsilon/nA \n \n # print(\"Q_value = {0}\".format(Q_value))\n # print(\"policy = {0}\".format(policy))\n \n if done:\n curr_state = env.reset() # reset the environment and place the agent in the start square\n curr_action = sample_action(policy, curr_state)\n else:\n curr_state = new_state\n curr_action = new_action\n \n stop = time.time()\n print(\"Training Completed.\")\n print(\"It took: {0} iterations and {1} minutes\".format(k,(stop-start)/60))\n \n ############################\n det_policy = np.argmax(Q_value, axis=1)\n return Q_value, det_policy", "def buildQ(self):\r\n\r\n print 'Building Q ...'\r\n\r\n self.y = T.matrix('y')\r\n\r\n mlp = MLP(activations=self.hyper['q_activs'],\r\n dims=self.hyper['q_dims'],\r\n weights_init=self.hyper['q_W_init'],\r\n biases_init=Constant(0))\r\n\r\n q_parameters = mlp.apply(self.y)\r\n mlp.initialize()\r\n\r\n # self.qxgy_mu.shape == (minibatch size, num of dimension of x)\r\n self.qxgy_mu = q_parameters[:,:self.hyper['x_dim']]\r\n\r\n # self.qxgy_var.shape == (minibatch size, num of dimension of x)\r\n self.qxgy_var = T.exp( q_parameters[:,self.hyper['x_dim']:2*self.hyper['x_dim']] )\r\n\r\n # self.qwgy_mu.shape == (minibatch size, num of dimension of w)\r\n self.qwgy_mu = q_parameters[:,2*self.hyper['x_dim']:2*self.hyper['x_dim']+self.hyper['w_dim']]\r\n\r\n # self.qwgy_var.shape == (minibatch size, num of dimension of w)\r\n self.qwgy_var = T.exp( q_parameters[:,2*self.hyper['x_dim']+self.hyper['w_dim']:] )\r\n\r\n\r\n #---Will be useful to compute samples from q(x|y)---#\r\n #self.eps_x.shape == (minibatch size, # of x samples , # of dimension of x)\r\n self.eps_x = self.srng.normal((self.qxgy_mu.shape[0] ,self.hyper['L_x'] ,self.hyper['x_dim']))\r\n\r\n #self.x corresponds roughly to the function g(\\epsilon,y) (see reparametrization trick in Kingma 2014)\r\n #self.x.shape == (minibatch size, # of x samples , # of dimension of x)\r\n self.x = self.qxgy_mu.dimshuffle(0,'x',1) + T.sqrt(self.qxgy_var).dimshuffle(0,'x',1)*self.eps_x\r\n\r\n #---Will be useful to compute samples from q(w|y)---#\r\n #self.eps_w.shape == (minibatch size, # of w samples , # of dimension of w)\r\n self.eps_w = self.srng.normal((self.qwgy_mu.shape[0] ,self.hyper['L_w'] ,self.hyper['w_dim']))\r\n\r\n #self.w corresponds roughly to the function g(\\epsilon,y) (see reparametrization trick in Kingma 2014)\r\n #self.w.shape == (minibatch size, # of w samples , # of dimension of w)\r\n self.w = self.qwgy_mu.dimshuffle(0,'x',1) + T.sqrt(self.qwgy_var).dimshuffle(0,'x',1)*self.eps_w\r\n\r\n\r\n #---Building the log density q(x|y)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.x - self.qxgy_mu.dimshuffle(0,'x',1))**2/(2*self.qxgy_var.dimshuffle(0,'x',1)), axis=2)\r\n norm_cst = (2*np.pi)**(-self.hyper['x_dim']/2.)*T.exp(T.sum(T.log(self.qxgy_var), axis=1))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n qxgy = norm_cst.dimshuffle(0,'x')*T.exp(inside_exp)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n self.log_qxgy = T.log(qxgy + little_num)", "def learn(self):\n if self.learn_step_counter % self.target_q_update_step == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict()) #update target_net's parameters\n logging.info(\"updtate target q\")\n self.learn_step_counter += 1\n\n rgbs,depths, rgbs_1, depths_1,questions,actions,rewards,terminals = self.memory.sample()\n\n rgbs_var = Variable(torch.FloatTensor(rgbs).cuda())\n depths_var = Variable(torch.FloatTensor(depths).cuda())\n rgbs_1_var = Variable(torch.FloatTensor(rgbs_1).cuda())\n depths_1_var = Variable(torch.FloatTensor(depths_1).cuda())\n questions_var = Variable(torch.LongTensor(questions).cuda())\n actions_var = Variable(torch.LongTensor(actions).cuda())\n rewards_var = Variable(torch.FloatTensor(rewards).cuda())\n terminals_var = Variable(torch.FloatTensor(terminals).cuda())\n\n q_eval_matrix = self.eval_net(rgbs_var,depths_var,questions_var)\n q_eval_matrix = q_eval_matrix.view(-1,9*28*28)\n actions_var = actions_var.view(-1,1)\n q_eval = torch.gather(q_eval_matrix, 1, actions_var) \n q_eval = q_eval.squeeze(1)\n\n q_next_matrix = self.target_net(rgbs_1_var,depths_1_var,questions_var).detach() #don't backward\n q_next_matrix = q_next_matrix.view(-1,9*28*28)\n q_next = torch.max(q_next_matrix,1)[0]\n\n one_var = Variable(torch.ones_like(terminals_var))\n\n q_target = rewards_var + (one_var- terminals_var)*self.discount * q_next\n \n loss = self.loss_func(q_eval, q_target)\n\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self.task_total_loss += loss.item()\n self.task_total_q += q_target.mean()\n self.update_count += 1", "def learn(self):\n \n # target parameter update\n # target parameter update\n if self.learn_step_counter % self.nu_iter == 0:\n self.target_net.load_state_dict(self.eval_net.state_dict())\n #testing the preformace of the network\n if self.learn_step_counter == 0:\n print('As referece this first test on dev data. Is maded with the Q networks, initialized randomly : ' )\n else:\n print(\"\\n Lets copy the Q-value Net in to Q-target net!. And test the performace on the dev data: \")\n \n current_bleu = self.dev_network()\n print(\"Current Bleu score is: \", current_bleu)\n \n self.learn_step_counter += 1\n\n \n long_Batch = self.sample_size*3\n # Sampling the higgest rewards values\n b_memory_big = self.memory[np.argsort(-self.memory[:-self.max_output_length, self.state_size+1])][:long_Batch]\n \n sample_index = np.random.choice(long_Batch, self.sample_size)\n b_memory = b_memory_big[sample_index, :]\n\n b_s = torch.FloatTensor(b_memory[:, :self.state_size])\n b_a = torch.LongTensor(b_memory[:, self.state_size:self.state_size+1].astype(int))\n b_r = torch.FloatTensor(b_memory[:, self.state_size+1:self.state_size+2])\n b_s_ = torch.FloatTensor(b_memory[:, self.state_size+2: self.state_size+2 + self.state_size])\n\n b_is_eos = torch.FloatTensor(b_memory[:, self.size_memory1-1:]).view(self.sample_size, 1)\n #print(b_a, b_a.size)\n #print(b_is_eos)\n #Activate the eval_net\n unfreeze_model(self.eval_net)\n \n # q_eval w.r.t the action in experience\n q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)\n q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\n #taking the most likely action.\n b_a_ = torch.LongTensor(q_next.max(1)[1].view(self.sample_size, 1).long())\n #b_a_ = q_next.max(1)[0].view(self.sample_size, 1).long() # shape (batch, 1)\n q_eval_next = self.eval_net(b_s_).gather(1, b_a_) # shape (batch, 1)\n \n #If eos q_target = reward. \n q_target = b_r + self.gamma * b_is_eos* q_eval_next.view(self.sample_size, 1) # shape (batch, 1)\n #version 0\n #q_target = b_r + self.gamma * q_next.max(1)[0].view(self.sample_size, 1) # shape (batch, 1)\n \n loss = self.loss_func(q_eval, q_target)\n \n self.tb_writer.add_scalar(\"learn/learn_batch_loss\",\n loss.data, self.learn_step_counter)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n #desctivate the eval_net\n freeze_model(self.eval_net)", "def initQ(\n self, env, warmupIter, outFolder, num_warmup_samples=200, vmin=-1,\n vmax=1, plotFigure=True, storeFigure=True\n ):\n lossList = []\n for iterIdx in range(warmupIter):\n print(\"\\rWarmup Q [{:d}]\".format(iterIdx + 1), end=\"\")\n states, heuristic_v = env.get_warmup_examples(\n num_warmup_samples=num_warmup_samples\n )\n\n self.Q_network.train()\n heuristic_v = torch.from_numpy(heuristic_v).float().to(self.device)\n states = torch.from_numpy(states).float().to(self.device)\n v = self.Q_network(states)\n loss = smooth_l1_loss(input=v, target=heuristic_v)\n lossList.append(loss.data.cpu().numpy())\n\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.Q_network.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n if plotFigure or storeFigure:\n self.Q_network.eval()\n env.visualize(self.Q_network, vmin=vmin, vmax=vmax, cmap=\"seismic\")\n if storeFigure:\n figureFolder = os.path.join(outFolder, \"figure\")\n os.makedirs(figureFolder, exist_ok=True)\n figurePath = os.path.join(figureFolder, \"initQ.png\")\n plt.savefig(figurePath)\n if plotFigure:\n plt.show()\n plt.pause(0.001)\n plt.close()\n self.target_network.load_state_dict(\n self.Q_network.state_dict()\n ) # hard replace\n self.build_optimizer()\n lossList = np.array(lossList)\n print(\"\\n => Warmup Q Ends\")\n return lossList", "def _transition_q_learning(self):\n if self.state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(self.state, self.qstore.q)\n\n action_values = self.qstore.q[self.state.as_tuple()]\n # epsilon greedy choice\n if np.random.random() < self.epsilon:\n action = State(*action_values['actions'][np.random.randint(len(action_values['actions']))])\n else:\n max_q_value = max(action_values['utilities'])\n max_q_indexes = [i for i in range(len(action_values['actions'])) if\n action_values['utilities'][i] == max_q_value]\n max_actions = [action_values['actions'][i] for i in max_q_indexes]\n action = State(*max_actions[np.random.randint(len(max_actions))])\n\n self.state = action.copy()\n\n self._post_transition_updates()", "def learn(self):\r\n \r\n # take a mini-batch from replay experience\r\n cur_batch_size = min(len(self.replay_exp), self.batch_size)\r\n mini_batch = random.sample(self.replay_exp, cur_batch_size)\r\n \r\n # batch data\r\n sample_states = np.ndarray(shape = (cur_batch_size, self.state_size)) # replace 128 with cur_batch_size\r\n sample_actions = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_rewards = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_next_states = np.ndarray(shape = (cur_batch_size, self.state_size))\r\n sample_dones = np.ndarray(shape = (cur_batch_size, 1))\r\n\r\n temp=0\r\n for exp in mini_batch:\r\n sample_states[temp] = exp[0]\r\n sample_actions[temp] = exp[1]\r\n sample_rewards[temp] = exp[2]\r\n sample_next_states[temp] = exp[3]\r\n sample_dones[temp] = exp[4]\r\n temp += 1\r\n \r\n \r\n sample_qhat_next = self.brain_target.predict(sample_next_states)\r\n \r\n # set all Q values terminal states to 0\r\n sample_qhat_next = sample_qhat_next * (np.ones(shape = sample_dones.shape) - sample_dones)\r\n # choose max action for each state\r\n sample_qhat_next = np.max(sample_qhat_next, axis=1)\r\n \r\n sample_qhat = self.brain_policy.predict(sample_states)\r\n \r\n for i in range(cur_batch_size):\r\n a = sample_actions[i,0]\r\n sample_qhat[i,int(a)] = sample_rewards[i] + self.gamma * sample_qhat_next[i]\r\n \r\n q_target = sample_qhat\r\n \r\n self.brain_policy.fit(sample_states, q_target, epochs = 1, verbose = 0)\r\n \r\n \r\n \r\n \"\"\"\r\n \r\n for state, action, reward, next_state, done in mini_batch:\r\n target_Q_s_a = 0 # new target for Q(s,a)\r\n state = np.reshape(state, [1, state_size])\r\n next_state = np.reshape(next_state, [1, state_size])\r\n \r\n # if it is not the terminal state\r\n if not done:\r\n qhat_next = self.brain_target.predict(next_state) # estimate Q(s',a')\r\n target_Q_s_a = reward + self.gamma * np.amax(qhat_next[0]) # because the output is m * n, so we need to consider the dimension [0]\r\n else:\r\n target_Q_s_a = reward\r\n \r\n target_output = self.brain_policy.predict(state) # we will replace target of Q(s,a) for specific a later\r\n target_output[0][action] = target_Q_s_a # new target for state s and action a\r\n \r\n self.brain_policy.fit(state, target_output, epochs = 1, verbose = 0)\r\n \r\n \"\"\"", "def perform_q_learning(self, prev_state: str, state: str, action: Action, reward):\n max_future_q = max(self.Qs[state].values())\n self.Qs[prev_state][action] = (1.0 - self.alpha) * self.Qs[prev_state][\n action] + self.alpha * (reward + self.gamma * max_future_q)\n self.Vs[prev_state] = max(self.Qs[prev_state].values())", "def learn(self):\n ## obtain sample batch using priority based sampling.\n states, actions, rewards, next_states, dones, weights, sample_inds = self.buffer.sample_batch(BETA)\n \n ## obtain the discounted sum of rewards from reward list\n ## also obtain final gamma multiplier\n reduced_rewards, gamma_multipliers = self.reduce_rewards(rewards)\n \n ## convert to tensors\n states = np_to_tensor(states)\n actions = np_to_tensor(actions)\n reduced_rewards = np_to_tensor(reduced_rewards)\n gamma_multipliers = np_to_tensor(gamma_multipliers)\n next_states = np_to_tensor(next_states)\n dones = np_to_tensor(dones)\n weights = np_to_tensor(np.array(weights))\n \n #### Updating Qnet\n \n ## actions from the target actor network\n greedy_actions = self.actor_target(next_states)\n ## compute temporal difference\n targets = reduced_rewards + torch.mul( torch.mul(gamma_multipliers , self.QNetwork_target(next_states, greedy_actions)) , (1-dones).unsqueeze(1))\n Q_sa = self.QNetwork_local(states, actions)\n \n td_error = targets - Q_sa\n \n ## update the priorities using temporal differences\n self.buffer.update_priority(sample_inds,\n (td_error).detach().abs().squeeze().cpu().data.numpy()+REPLAY_EPS)\n \n ## compute the loss, importance sampling weights are used\n loss = ((td_error).pow(2)*weights).mean()\n \n self.QNet_optim.zero_grad()\n loss.backward()\n self.QNet_optim.step()\n \n ### Updating Actor\n pred_actions = self.actor_local(states)\n actor_loss = - self.QNetwork_local(states, pred_actions).mean()\n \n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n \n #### Polyak Updates\n self.soft_update(self.QNetwork_local, self.QNetwork_target, TAU)\n self.soft_update(self.actor_local, self.actor_target, TAU)", "def q_learning(env, learning, discount, epsilon, min_eps, episodes):\n # [18.00000072 14.00000006]\n num_states = (env.observation_space.high - env.observation_space.low) * \\\n np.array([10, 100]) # >> [18.00000072 14.00000006]\n num_states = np.round(num_states, 0).astype(int) + 1 # >> [19 15]\n\n # Initialize Q table\n # env.action_space.n return the number of action that our agent can make (here 3, left, cease, right)\n Q = np.random.uniform(low=-1, high=1, size=(num_states[0], num_states[1], env.action_space.n))\n\n # Initialize variable to track rewards\n reward_list = []\n ave_reward_list = []\n\n # Calculate episodic reduction in epsilon\n reduction = (epsilon - min_eps) / (episodes / 2)\n\n for i in range(episodes):\n # Initialize parameters\n done = False\n tot_reward, reward = 0, 0\n state = env.reset()\n\n # Discretize state\n state_adj = adjust_state(state)\n\n while done != True:\n # Render env for last five eps\n if i >= (episodes - 20):\n env.render()\n\n # Determine next action - epsilon greedy strategy\n if np.random.random() < 1 - epsilon:\n action = np.argmax(Q[state_adj[0], state_adj[1]])\n else:\n action = np.random.randint(0, env.action_space.n)\n\n # Get next state and reward\n state2, reward, done, info = env.step(action)\n\n # Discretize state2\n state2_adj = adjust_state(state2)\n\n # Allow for terminal states // .5 on env_space[0] represent the flag position\n if done and state2[0] >= .5:\n Q[state_adj[0], state_adj[1], action] = reward\n\n # adjust Q value for current state\n else:\n '''work on this, it's complicated but far from non-understandable'''\n delta = learning*(reward + discount*np.max(Q[state2_adj[0], state2_adj[1]]) -\n Q[state_adj[0], state_adj[1], action])\n Q[state_adj[0], state_adj[1], action] += delta\n\n tot_reward += reward\n state_adj = state2_adj\n\n # Decay epsilon\n if epsilon > min_eps:\n epsilon -= reduction\n\n # Track rewards\n reward_list.append(tot_reward)\n\n if (i+1) % 100 == 0:\n ave_reward = np.mean(reward_list)\n ave_reward_list.append(ave_reward)\n reward_list = []\n print(f'Episode {i+1} Average Reward: {ave_reward}')\n\n env.close()\n\n return ave_reward_list", "def q_update(self):\n\n # exit if the experience buffer is not yet large enough\n if self.experience_buffer.size < self.batch_size:\n return\n \n # get the random batch\n states, action_indices, rewards, not_terminals, succ_states, succ_players, succ_legal_moves = self.experience_buffer.random_batch(self.batch_size)\n states = states.to(Globals.device)\n action_indices = action_indices.to(Globals.device)\n rewards = rewards.to(Globals.device)\n not_terminals = not_terminals.to(Globals.device)\n succ_states = succ_states.to(Globals.device)\n succ_players = succ_players.to(Globals.device)\n\n # prepare the training data\n q_values = self.target_network(succ_states)\n target = torch.empty(1, self.batch_size)\n for i in range(self.batch_size):\n if not_terminals[i] == 0:\n target[0, i] = rewards[i]\n continue\n\n if succ_players[i] == CONST.WHITE_MOVE:\n legal_q_values = q_values[0, 0:9][succ_legal_moves[i]]\n q_value, _ = legal_q_values.max(0)\n else:\n legal_q_values = q_values[0, 9:18][succ_legal_moves[i]]\n q_value, _ = legal_q_values.min(0)\n\n target[0, i] = rewards[i] + self.disc*not_terminals[i]*q_value\n\n # execute the training step of the network\n self.training_network.train_step(states, target, action_indices) # the eligibility trace is used as td target", "def update(self, state, action, nextState, reward):\n \"\"\"Description:\n Use Q-Learning algoritm in slide 58 of MDP\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n maxQns = self.getValue(nextState) # get max q-value of next state\n if maxQns == None:\n maxQns = 0\n Qsa = self.getQValue(state, action) #self.qValues[(state, action)]\n difference = reward + self.discountRate * maxQns - Qsa\n self.qValues[(state, action)] += self.alpha * difference\n \n self.vitCount[(state, action)] += 1\n \"\"\" END CODE \"\"\"", "def Q_learning_train(env,alpha,gamma,epsilon,episodes):\n %time\n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n \n \n print(\"Training finished.\\n\")\n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened training reward per episode\", pad = 30, size = BIGGER_SIZE)\n plt.legend()\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major');\n plt.tick_params(axis='both', which='minor');\n #plt.xlim(0, 60000);\n #plt.ylim(0,50)\n #plt.xticks(np.arange(0, episodes+1, 5000));\n #plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));", "def update_q_values(self, state, action, next_state, reward, done):\n # Following the Q-Learning update rule\n if done:\n self.Q_values[state,action] = (1 - self.alpha) * self.Q_values[state,action] + self.alpha * (reward)\n else:\n self.Q_values[state,action] = (1 - self.alpha) * self.Q_values[state,action] + self.alpha * (reward + self.gamma * np.amax(self.Q_values[next_state]))", "def policies(self, QTable, epsilon, state, next_states, action_to_do): # Inspiration from https://www.geeksforgeeks.org/q-learning-in-python/?fbclid=IwAR1UXR88IuJBhhTakjxNq_gcf3nCmJB0puuoA46J8mZnEan_qx9hhoFzhK8\r\n num_actions = 5 # 5 actions-value, [moved_out, into_goal, send_opp_home, send_self_home, move_token] \r\n def epsilonGreedyPolicy(): \r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n valid_act_len = len(np.where(valid_actions==True)[0])\r\n\r\n Action_probabilities = np.ones(num_actions, dtype = float) * epsilon / valid_act_len # divides probability based on number of valid actions and epsilon (each 0.025 if 4 actions) \r\n Action_probabilities = np.multiply(Action_probabilities, valid_actions)\r\n\r\n # If same values in QTable choose random valid action \r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n Action_probabilities[best_action] += (1.0 - epsilon) # Assigns rest probability to best action so probability sums to 1\r\n\r\n return Action_probabilities \r\n\r\n def greedyPolicy():\r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n\r\n Action_probabilities = np.zeros(num_actions, dtype = float)\r\n\r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n\r\n Action_probabilities[best_action] += 1.0\r\n return Action_probabilities\r\n\r\n\r\n if(self.__chosenPolicy == \"epsilon greedy\"):\r\n return epsilonGreedyPolicy \r\n if(self.__chosenPolicy == \"greedy\"):\r\n return greedyPolicy", "def qlearn(self, num_simulations):\n initial_maze_loc = self.maze.location\n for i in range(num_simulations):\n curr_coord = self.maze.location\n new_epsilon = round(1 - (i+1)/num_simulations, 2)\n self.epsilon = new_epsilon if new_epsilon > 0 else self.epsilon\n\n while (self.grid[curr_coord[0]][curr_coord[1]] != 'G' and\n self.grid[curr_coord[0]][curr_coord[1]] != 'E'):\n rand_num = round(random.random(), 2)\n\n move = (0,0)\n if rand_num < self.epsilon: # exploration\n move = random.choice(self.maze.moves())\n else: # exploitation\n possible_moves = self.maze.moves()\n best_next_move_q = 0\n for pmove in possible_moves:\n if (self.qtable[curr_coord[0]+pmove[0]][curr_coord[1]+pmove[1]] >=\n best_next_move_q):\n move = pmove\n best_next_move_q = (\n self.qtable[curr_coord[0]+pmove[0]][curr_coord[1]+pmove[1]])\n\n self.q(curr_coord, move)\n curr_coord = (curr_coord[0]+move[0], curr_coord[1]+move[1])\n self.maze.location = curr_coord\n self.maze.location = initial_maze_loc\n #print(f\"Simulation {i+1} of {num_simulations} complete.\")", "def learn(self, state, action, reward, next_state):\r\n\r\n \"\"\"Please Fill Your Code Here.\r\n \"\"\"\r\n self.Q[state][action] = self.Q[state][action] + self.alpha * (reward + self.gamma * max(self.Q[next_state]) - self.Q[state][action])\r\n\r\n return 0", "def inverse_q_learning(feature_matrix,nA, gamma, transitions, alpha_r, alpha_q, alpha_sh, epochs, real_distribution):\n nS = feature_matrix.shape[0]\n\n \n # initialize tables for reward function, value functions and state-action visitation counter.\n r = np.zeros((nS, nA))\n q = np.zeros((nS, nA))\n q_sh = np.zeros((nS, nA))\n state_action_visitation = np.zeros((nS, nA))\n\n for i in range(epochs):\n if i%10 == 0:\n print(\"Epoch %s/%s\" %(i+1, epochs))\n \n for traj in transitions:\n for (s, a, _, ns) in traj:\n state_action_visitation[s][a] += 1\n d = False # no terminal state\n\n # compute shifted q-function.\n q_sh[s, a] = (1-alpha_sh) * q_sh[s, a] + alpha_sh * (gamma * (1-d) * np.max(q[ns]))\n \n # compute log probabilities.\n sum_of_state_visitations = np.sum(state_action_visitation[s])\n log_prob = np.log((state_action_visitation[s]/sum_of_state_visitations) + epsilon)\n \n # compute eta_a and eta_b for Eq. (9).\n eta_a = log_prob[a] - q_sh[s][a]\n other_actions = [oa for oa in range(nA) if oa != a]\n eta_b = log_prob[other_actions] - q_sh[s][other_actions]\n sum_oa = (1/(nA-1)) * np.sum(r[s][other_actions] - eta_b)\n\n # update reward-function.\n r[s][a] = (1-alpha_r) * r[s][a] + alpha_r * (eta_a + sum_oa)\n\n # update value-function.\n q[s, a] = (1-alpha_q) * q[s, a] + alpha_q * (r[s, a] + gamma * (1-d) * np.max(q[ns]))\n s = ns\n\n # compute Boltzmann distribution.\n boltzman_distribution = []\n for s in range(nS):\n boltzman_distribution.append([])\n for a in range(nA):\n boltzman_distribution[-1].append(np.exp(q[s][a]))\n boltzman_distribution = np.array(boltzman_distribution)\n boltzman_distribution /= np.sum(boltzman_distribution, axis=1).reshape(-1, 1)\n return q, r, boltzman_distribution", "def Q_learning_test(env,alpha,gamma,episodes, q_table):\n %time\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n total_reward = 0\n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n \n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n next_state, reward, done, info = env.step(action) \n\n\n if reward == -10:\n penalties += 1\n \n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n \n total_reward += reward\n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n\n \n print(\"Training finished.\\n\")\n \n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened testing reward per episode\", pad = 30 , size = BIGGER_SIZE)\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major', labelsize=16);\n plt.tick_params(axis='both', which='minor', labelsize=16);\n #plt.xlim(100000, 200000);\n #plt.ylim(0,50)\n # plt.xticks(np.arange(0, episodes+1, 5000));\n # plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));", "def batch_q_learning(self):\n\n if(self.memory.get_usage() > Parameters.AGENT_HISTORY_LENGTH):\n\n state_t, action, reward, state_t_plus_1, terminal, i_s_weights, memory_indices = self.memory.bring_back_memories()\n\n q_t_plus_1 = self.tf_session.run(\n self.target_dqn.q_values, {\n self.target_dqn_input: state_t_plus_1})\n max_q_t_plus_1 = np.max(q_t_plus_1, axis=1)\n\n target_q_t = (1. - terminal) * \\\n Parameters.DISCOUNT_FACTOR * max_q_t_plus_1 + reward\n\n _, q_t, losses = self.tf_session.run([self.dqn.optimize, self.dqn.q_values, self.dqn.errors],\n {\n self.dqn.target_q: target_q_t,\n self.dqn.action: action,\n self.dqn_input: state_t,\n self.dqn.i_s_weights: i_s_weights\n })\n\n self.memory.update(\n memory_indices,\n np.squeeze(q_t),\n losses,\n self.get_learning_completion())\n input_shape = (1, Parameters.IMAGE_HEIGHT, Parameters.IMAGE_WIDTH, Parameters.AGENT_HISTORY_LENGTH)\n dqn_input = self.environment.get_input().reshape(input_shape)\n q_values = self.tf_session.run(\n self.dqn.q_values, {\n self.dqn_input: dqn_input})\n Plotter.add_q_values_at_t(q_values)\n else:\n print('[WARNING] Not enough memory for a batch')", "def learn(self):\n batch = self.agent.replay_buffer.sample(self.batch_size)\n states = torch.tensor([x.state for x in batch], dtype=torch.float32).to(self.agent.device) # shape == (batch_size, 3, 6, 7)\n actions = [x.action for x in batch]\n rewards = torch.tensor([x.reward for x in batch], dtype=torch.float32).to(self.agent.device)\n next_states = torch.tensor([x.next_state for x in batch], dtype=torch.float32).to(self.agent.device)\n dones = [x.done for x in batch]\n\n self.optimizer.zero_grad()\n\n\n q_vals = self.agent.policy_net(states)[range(len(actions)), actions] # Q vals for actions taken\n q_next_vals = self.agent.target_net(next_states).detach() # we don't care about grad wrt target net\n q_next_vals[dones] = 0.0 # terminal states have no future expected value\n q_targets = rewards + self.gamma * torch.max(q_next_vals, dim=1)[0]\n\n # all_q_vals = self.agent.policy_net(states)\n # print()\n # print('actions')\n # print(actions)\n # print()\n # print('original all q vals')\n # print(self.agent.policy_net(states)) \n # print(self.agent.policy_net(states).shape)\n # print()\n # print('QVALS:', q_vals)\n # print(q_vals.shape)\n # print('\\n\\n')\n # print('QTARGETS:', q_targets)\n # print(q_targets.shape)\n\n # breakpoint()\n\n loss = self.loss_fn(q_targets, q_vals).to(self.agent.device)\n loss.backward()\n \n # for layer in self.agent.policy_net.named_parameters():\n \n # # print(f'layer: {layer[0]}')\n # # print(f'grad:', layer[1].grad)\n\n # # print('loss', loss)\n # # print('q_vals grad:', q_vals.grad)\n # # print('states:', )\n\n self.optimizer.step()\n\n self.agent.learning_iters += 1\n if self.agent.learning_iters % self.target_update_freq == 0:\n self.agent.update_target_net()\n # logger.info('Updated target net')", "def _update_q_value(self, start_state, to_state, reward, iteration):\n if start_state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(start_state, self.qstore.q)\n if to_state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(to_state, self.qstore.q)\n\n actions = self.qstore.q[start_state.as_tuple()]['actions']\n values = self.qstore.q[start_state.as_tuple()]['utilities']\n\n max_over_next_states = max(self.qstore.q[to_state.as_tuple()]['utilities']) if to_state.terminate != 1 else 0\n\n action_between_states = to_state.as_tuple()\n\n action_index = actions.index(action_between_states)\n learning_rate_alpha = 1 / (iteration ** self.state_space_parameters.learning_rate_omega)\n\n # Q_Learning update rule\n values[action_index] = ( # Q_t+1(s_i,𝑢) =\n values[action_index] + # Q_t(s_i,𝑢)\n learning_rate_alpha * ( # α\n reward # r_t\n + self.state_space_parameters.discount_factor # γ\n * max_over_next_states # max_{𝑢'∈ 𝒰(s_j)} Q_t(s_j,𝑢')\n - values[action_index] # -Q_t(s_i,𝑢)\n )\n )\n\n self.qstore.q[start_state.as_tuple()] = {'actions': actions, 'utilities': values}", "def q_learning(env, model, episodes, gamma=0.9,\n epsilon=0.3, eps_decay=0.99,\n replay=False, replay_size=20,\n title='DQL', double=False,\n n_update=10, soft=False, verbose=True):\n final = []\n memory = []\n episode_i = 0\n sum_total_replay_time = 0\n for episode in range(episodes):\n episode_i += 1\n if double and not soft:\n # Update target network every n_update steps\n if episode % n_update == 0:\n model.target_update()\n if double and soft:\n model.target_update()\n\n # Reset state\n state = env.reset()\n done = False\n total = 0\n\n while not done:\n # Implement greedy search policy to explore the state space\n if random.random() < epsilon:\n action = env.action_space.sample()\n else:\n q_values = model.predict(state)\n action = torch.argmax(q_values).item()\n\n # Take action and add reward to total\n next_state, reward, done, _ = env.step(action)\n\n # Update total and memory\n total += reward\n memory.append((state, action, next_state, reward, done))\n q_values = model.predict(state).tolist()\n\n if done:\n if not replay:\n q_values[action] = reward\n # Update network weights\n model.update(state, q_values)\n break\n\n if replay:\n t0 = time.time()\n # Update network weights using replay memory\n model.replay(memory, replay_size, gamma)\n t1 = time.time()\n sum_total_replay_time += (t1 - t0)\n else:\n # Update network weights using the last step only\n q_values_next = model.predict(next_state)\n q_values[action] = reward + gamma * torch.max(q_values_next).item()\n model.update(state, q_values)\n\n state = next_state\n\n # Update epsilon\n epsilon = max(epsilon * eps_decay, 0.01)\n final.append(total)\n plot_res(final, title)\n\n if verbose:\n print(\"episode: {}, total reward: {}\".format(episode_i, total))\n if replay:\n print(\"Average replay time:\", sum_total_replay_time / episode_i)\n\n return final", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones = experiences\n\n #simple implementation of a python noob to implement DDQN\n bla = torch.from_numpy(np.zeros(64)).float().to(device)\n for i in range(64):\n bla[i] = self.qnetwork_target(next_states[i]).detach()[self.qnetwork_local(next_states).detach().argmax(1)[i]]\n Q_targets_next = bla.unsqueeze(1)\n #this was my first try of ddqn in python style, but as i said i'm a noob and didn't get it working\n #Q_targets_next = [self.qnetwork_target(next_states).detach()[i] for i in self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)]\n \n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)", "def learn(self):\n Qsa = self.evalQsa(self.features)[self.chosenA]\n print Qsa\n dQ = self.alpha*(self.reward + self.gamma * self.maxQsa(self.next_features) - Qsa)\n self.thetas += dQ*self.features[:,self.chosenA]\n print self.thetas\n # self.thetas /= np.sqrt(np.sum(np.power(self.thetas,2)))", "def q_learn(self, trainingtime):\r\n def initLists():\r\n return [0 for i in range(len(self.game.combined_playbook)+1)]\r\n q_table = collections.defaultdict(initLists)\r\n random.seed()\r\n self.train(trainingtime, q_table)\r\n\r\n self.q_table = q_table\r\n game = self.game\r\n def func(state, cards, index, junk):\r\n encoded_state= tuple(game.bin_encoded_state(game.encode_full_state(state)))\r\n best = 0\r\n currmax = -10\r\n valid_moves = game.valid_moves(state)\r\n for i in range(len(valid_moves)):\r\n if q_table[encoded_state][valid_moves[i]] > currmax:\r\n best = i\r\n currmax = q_table[encoded_state][valid_moves[i]]\r\n return valid_moves[best] \r\n\r\n return func", "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, weights, indexes = experiences\n\n q_expected, q_targets = self.get_target_and_expected(states, \n actions, \n rewards, \n next_states, \n dones, \n gamma)\n\n #print('q_expected.shape', q_expected.shape)\n #print('q_targets.shape', q_targets.shape)\n \n # Compute loss\n ##### deltas = F.mse_loss(q_expected, q_targets)\n deltas = q_expected - q_targets\n #print('loss.shape', loss.data.cpu().numpy().shape)\n #print('loss', loss)\n \n _sampling_weights = (torch.Tensor(weights)\n .view((-1, 1)))\n \n # mean square error\n loss = torch.mean((deltas * _sampling_weights)**2)\n\n # importance sampling weights used to correct bias introduced \n # by prioritisation experience replay\n # See Annealing the bias https://arxiv.org/abs/1511.05952\n #with torch.no_grad():\n # weight = sum(np.multiply(weights, loss.data.cpu().numpy()))\n # print('weight', weight)\n # loss *= weight\n # print('weights.shape', weights.shape)\n # print('loss type', type(loss))\n # print('loss shape', loss.size())\n # loss *= weights\n # Minimize the loss\n # call zero_grad before calling backward() \n # o.w. gradients are accumulated from multiple passes\n self.optimizer.zero_grad()\n # backward computes dloss/dx for every parameter x\n loss.backward()\n # updates parameters\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) \n \n # ------------------- update priorities ------------------- # \n priorities = abs(deltas.detach()).numpy()\n #priorities = abs(q_expected.detach() - q_targets.detach()).numpy()\n self.memory.update_priorities(priorities, indexes)", "def qUpdate(self,state,action,reward,next_state):\r\n #add to experience\r\n if next_state != \"end\":\r\n self.experience.append([self.feat_funct(state),action,\r\n reward,self.feat_funct(next_state)])\r\n else:\r\n self.experience.append([self.feat_funct(state),action,\r\n reward,next_state]) \r\n #print(state,action,reward,next_state)\r\n #get minibatch\r\n sample = np.random.randint(0,len(self.experience),self.batch_size)\r\n d = np.zeros((self.batch_size,self.size))\r\n y = np.zeros((self.batch_size,1))\r\n for i,row in enumerate(sample):\r\n state,action,reward,next_state = self.experience[row]\r\n #get feature vector\r\n d[i,:] = self.comb_feat_action(state,action)\r\n #get target\r\n #check if end of episoe\r\n if next_state == 'end':\r\n y[i] = reward\r\n else:\r\n y[i] = reward + self.gamma * self.maxOldQ(next_state)\r\n #print(row,next_state)\r\n #print(self.maxOldQ(next_state))\r\n \r\n loss = self.train(d,y)\r\n \r\n #update old learner if greater than num_update\r\n if self.iteration % self.num_update == 0:\r\n self.old_learn = copy.deepcopy(self.learner)\r\n self.iteration += 1\r\n \r\n return(loss)", "def _learn(self, state):\n p_state, p_action = self.prev\n if p_state is None:\n return\n self.Q[p_state][p_action] = self.learning_rate * (self.R(state) + self.discount * max(self.Q[state].values())) - self.Q[p_state][p_action]", "def update (self):\n\t\tidx = self.idx\n\t\tC = self.C[idx]\t\t# choice\n\t\tPE = self.PE[idx]\t# choice PE\n\t\talpha = self.alpha\t# learning rate\n\n\t\t# don't need to update anything for UCB\n\t\tif self.UCB_samplemean:\n\t\t\treturn\n\n\t\tif not self.gamble:\n\t\t\t# carry over values for the unselected options\n\t\t\tself.Q[idx+1,:] = self.Q[idx,:]\n\t\t\t# check if two learning rates (pos/neg)\n\t\t\tif isinstance(alpha,float):\n\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha*PE\n\t\t\telse:\n\t\t\t\tif PE > 0:\n\t\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha[0]*PE\n\t\t\t\telse:\n\t\t\t\t\tself.Q[idx+1,C] = self.Q[idx,C] + alpha[1]*PE\n\n\t\telse:\n\t\t\t# check if two learning rates (pos/neg)\n\t\t\t# PE = 0 if gamble isn't chosen\n\t\t\tif isinstance(alpha,float):\n\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha*PE\n\t\t\telse:\n\t\t\t\tif PE > 0:\n\t\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha[0]*PE\n\t\t\t\telse:\n\t\t\t\t\tself.Q[idx+1] = self.Q[idx] + alpha[1]*PE" ]
[ "0.7476597", "0.7048921", "0.6965554", "0.68989587", "0.6872403", "0.6778621", "0.668104", "0.6651404", "0.6628057", "0.6619447", "0.65897125", "0.6540257", "0.65138745", "0.650373", "0.6488787", "0.64733046", "0.6472302", "0.6450761", "0.6444047", "0.6443331", "0.6420884", "0.6325316", "0.63097507", "0.62979627", "0.62914497", "0.62546164", "0.6242289", "0.62403274", "0.6238313", "0.6235598" ]
0.7540043
0
Get a dictionary containing the total score for ``obj`` and the number of votes it's received. Thus, it can be used to calculate the best rated objects in a very simplified scale. This isn't a very good rating function right now, because an object that has got a lot of up and downvotes is a reflection of its popularity, and then its score matters.
def get_score(self, obj): content_type = ContentType.objects.get_for_model(obj) result = self.filter(content_type=content_type, object_id=obj._get_pk_val()).aggregate( score=Sum('vote'), num_votes=Count('vote')) #It may happen that there has been no voting on this object so far. if result['score'] is None: result['score'] = 0 result['upvotes'] = self.get_upvotes(obj) result['downvotes'] = self.get_downvotes(obj) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_score(self, obj):\n ctype = ContentType.objects.get_for_model(obj)\n result = self.filter(object_id=obj._get_pk_val(),\n content_type=ctype).extra(\n select={\n 'score': 'COALESCE(SUM(vote), 0)',\n 'num_votes': 'COALESCE(COUNT(vote), 0)',\n }).values_list('score', 'num_votes')[0]\n\n return {\n 'score': int(result[0]),\n 'num_votes': int(result[1]),\n }", "def get_score(self, obj):\r\n query = \"\"\"\r\n SELECT SUM(vote), COUNT(vote)\r\n FROM %s\r\n WHERE content_type_id = %%s\r\n AND object_id = %%s\"\"\" % qn(self.model._meta.db_table)\r\n ctype = ContentType.objects.get_for_model(obj)\r\n cursor = connection.cursor()\r\n cursor.execute(query, [ctype.id, obj._get_pk_val()])\r\n result = cursor.fetchall()[0]\r\n # MySQL returns floats and longs respectively for these\r\n # results, so we need to convert them to ints explicitly.\r\n return {\r\n 'score': result[0] and int(result[0]) or 0,\r\n 'num_votes': int(result[1]),\r\n }", "def get_scores_in_bulk(self, objects):\n object_ids = [o._get_pk_val() for o in objects]\n if not object_ids:\n return {}\n \n ctype = ContentType.objects.get_for_model(objects[0])\n \n if supports_aggregates:\n queryset = self.filter(\n object_id__in = object_ids,\n content_type = ctype,\n ).values(\n 'object_id',\n ).annotate(\n score = CoalesceSum('vote', default='0'),\n num_votes = CoalesceCount('vote', default='0'),\n )\n else:\n queryset = self.filter(\n object_id__in = object_ids,\n content_type = ctype,\n ).extra(\n select = {\n 'score': 'COALESCE(SUM(vote), 0)',\n 'num_votes': 'COALESCE(COUNT(vote), 0)',\n }\n ).values('object_id', 'score', 'num_votes')\n queryset.query.group_by.append('object_id')\n \n vote_dict = {}\n for row in queryset:\n vote_dict[row['object_id']] = {\n 'score': int(row['score']),\n 'num_votes': int(row['num_votes']),\n }\n \n return vote_dict", "def get_scores_in_bulk(self, objects):\r\n vote_dict = {}\r\n if len(objects) > 0:\r\n query = \"\"\"\r\n SELECT object_id, SUM(vote), COUNT(vote)\r\n FROM %s\r\n WHERE content_type_id = %%s\r\n AND object_id IN (%s)\r\n GROUP BY object_id\"\"\" % (\r\n qn(self.model._meta.db_table),\r\n ','.join(['%s'] * len(objects))\r\n )\r\n ctype = ContentType.objects.get_for_model(objects[0])\r\n cursor = connection.cursor()\r\n cursor.execute(query, [ctype.id] + [obj._get_pk_val() \\\r\n for obj in objects])\r\n results = cursor.fetchall()\r\n vote_dict = dict([(int(object_id), {\r\n 'score': int(score),\r\n 'num_votes': int(num_votes),\r\n }) for object_id, score, num_votes in results])\r\n return vote_dict", "def score_object(cls, obj: Any) -> float:\n\n if not obj:\n return -1.0\n\n def score(value: Any) -> float:\n if isinstance(value, str):\n return 1.0\n\n if value is not None:\n return 1.5\n\n return 0.0\n\n if is_dataclass(obj):\n return sum(score(getattr(obj, var.name)) for var in fields(obj))\n\n return score(obj)", "def get_sort_data(cls, obj, **kwargs):\n prediction_results = obj.extra_data.get(\"arxiv_guessing\")\n if prediction_results:\n prediction_results = prediction_results[0].get(\"result\")\n max_score = prediction_results.get(\"max_score\")\n decision = prediction_results.get(\"decision\")\n relevance_score = max_score\n if decision == \"CORE\":\n relevance_score += 10\n elif decision == \"Rejected\":\n relevance_score = (max_score * -1) - 10\n return {\n \"max_score\": prediction_results.get(\"max_score\"),\n \"decision\": prediction_results.get(\"decision\"),\n \"relevance_score\": relevance_score\n }\n else:\n return {}", "def get_score(self):\r\n if self.is_complete():\r\n score = 1\r\n elif self.is_half_complete():\r\n score = 0.5\r\n else:\r\n score = 0\r\n return {'score': score,\r\n 'total': self.max_score()}", "def get_score(self):\r\n score = self.latest_score()\r\n return {'score': score if score is not None else 0,\r\n 'total': self._max_score}", "def alt_score(objects):\n scores = {}\n for tweet in objects:\n data = tweet._json\n raw_time = datetime.strptime(\n data['created_at'],\n '%a %b %d %H:%M:%S +0000 %Y'\n )\n age = ((datetime.utcnow() - raw_time).seconds / 60) + 1\n rt = data['retweet_count']\n fave = data['favorite_count']\n fol = data['user']['followers_count']\n weight = 1.5\n e2f = ((weight * rt + fave) / (fol / 2)) * 1000\n e2a = enagement / age\n score = e2f + e2a\n scores[score] = data['id']\n embeds = []\n for item in sorted(scores.items(), reverse=True)[:13]:\n embed = twitter.get_oembed(id=item[1], align='center')\n embeds.append(embed['html'])\n return embeds", "def get_vote_score(self):\n q = PostVote.objects.filter(post=self).aggregate(Sum('score'))\n return q['score__sum'] if q['score__sum'] else 0", "def get_upvotes(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n\n votes = self.filter(content_type=content_type, object_id=obj._get_pk_val(), vote__exact=UPVOTE).aggregate(upvotes=Sum('vote'))\n\n if votes['upvotes'] is None:\n votes['upvotes'] = 0\n\n return votes['upvotes']", "def get_score(self):\r\n correct = 0\r\n for key in self.correct_map:\r\n try:\r\n correct += self.correct_map.get_npoints(key)\r\n except Exception:\r\n log.error('key=%s, correct_map = %s', key, self.correct_map)\r\n raise\r\n\r\n if (not self.student_answers) or len(self.student_answers) == 0:\r\n return {'score': 0,\r\n 'total': self.get_max_score()}\r\n else:\r\n return {'score': correct,\r\n 'total': self.get_max_score()}", "def votes_dict(self):\n votes = {'M': 0, 'B': 0}\n for q in self.qualities.all():\n if q.correct:\n votes['M'] += 1\n else:\n votes['B'] += 1\n return votes", "def calc_score(user_chosen_dict: dict) -> list:\n res = pmag.MagicDict()\n duty_list = user_chosen_dict['like']\n require_list = user_chosen_dict['cando']\n\n duty_cate_score = get_duty_cate_score(duty_list)\n require_post_score = get_require_post_score(require_list)\n demand_post_score = get_demand_post_score(require_list)\n\n for cate, _posts in require_post_score.items():\n cate_s = duty_cate_score.get(cate, 0)\n for post, post_s in _posts.items():\n demand_s = demand_post_score[cate][post]\n score = cate_s * 0.5 + post_s * 0.3 + demand_s * 0.2\n res[cate+\"-\"+post] = score\n sorted_res = sorted(res.items(), key=lambda x: x[1], reverse=True)\n return sorted_res", "def stats(self):\n\n labels = []\n data = []\n\n propositions = Proposition.objects.filter(poll=self)\n for proposition in propositions:\n if proposition.votes_nb() != 0:\n labels.append(proposition.label)\n data.append(proposition.votes_nb())\n\n return {'labels': labels, 'data': data}", "def get_real_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/self.votes", "def votes_dict(self):\n\n return {\n \"for\": self._yay_voters(),\n \"against\": self._nay_voters(),\n \"present\": self._present_voters(),\n \"abstain\": self._abstain_voters(),\n }", "def _calculate_vote_fractions(candidate_to_vote_count):\n total_votes = sum(candidate_to_vote_count.values()) or 1\n return {\n candidate: vote_count / total_votes\n for candidate, vote_count\n in candidate_to_vote_count.items()\n }", "def score_candidates(self,\n cand_list: List[Union[CandidateEntry, Tuple[str, float]]],\n query_info_obj_or_dict: Union[DataEntryFields, dict]) -> Dict[str, float]:\n query_text = self.get_query_text(query_info_obj_or_dict)\n\n if self.text_proc_obj_query is not None:\n query_text = self.text_proc_obj_query(query_text)\n\n query_text = self.handle_case(query_text)\n query_toks = query_text.split()\n query_terms_idfs = {w: self.calc_idf(w) for w in set(query_toks)}\n\n res = {}\n\n for doc_id, score in cand_list:\n doc_text = self.fwd_indx.get_doc_text(doc_id)\n if self.text_proc_obj_doc is not None:\n doc_text = self.text_proc_obj_doc(doc_text)\n doc_text = self.handle_case(doc_text)\n doc_toks = doc_text.split()\n doc_len = len(doc_toks)\n counts = Counter(doc_toks)\n score = 0\n for qterm in query_toks:\n tf = counts[qterm]\n if tf > 0:\n qidf = query_terms_idfs[qterm]\n norm_tf = (tf * (self.k1 + 1)) / \\\n (tf + self.k1 * (1 - self.b + self.b * doc_len * self.inv_avg_doc_len))\n score += qidf * norm_tf\n\n res[doc_id] = score\n\n return res", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def get_scores(self):\n precision = self.right / self.count\n APs = self.right_labels / self.count\n mAP = np.mean(APs)\n distance = self.distance / self.count\n\n return {'precision': precision,\n 'APs': APs,\n 'mAP': mAP,\n 'distance': distance\n }", "def __match_num(self, obj):\n score = 0\n for attr in self.list:\n try:\n if getattr(obj, attr) == getattr(self, attr):\n score += 1\n except AttributeError:\n pass\n return score", "def totalRating(self):\r\n result = 0\r\n for v in self.votes:\r\n result += v.voteType.weight\r\n\r\n return result", "def get_rating(self):\n if not (self.votes and self.score):\n return 0\n return float(self.score)/(self.votes+self.field.weight)", "def get_fact_score(extracted_scores,\n subj,\n obj,\n freq_dict,\n score_type='FREQ_SCORE'):\n score_types = set('FREQ_SCORE', 'MIN_SCORE')\n # Min of Page Rank scores of both Entities\n # Upweight facts where both have high scores\n min_score = min(\n extracted_scores[subj], extracted_scores[obj]\n )\n\n # Freq Score - If both entities are present - sum of frequencies\n # Upweight facts where both entities are in passage\n if subj in freq_dict and obj in freq_dict:\n freq_score = freq_dict[subj] + freq_dict[obj]\n else:\n freq_score = min(extracted_scores[subj],\n extracted_scores[obj])\n if score_type == 'FREQ_SCORE':\n return freq_score\n elif score_type == 'MIN_SCORE':\n return min_score\n else:\n ValueError(\n 'The score_type should be one of: %s' + ', '.join(list(score_types)))", "def _tmdb_score(title, min_votes=0):\n info = tmdb_info(title)\n if info:\n if min_votes and info['votes'] < min_votes:\n return 0\n return info['vote_average']\n return 0", "def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc", "def generate_tweet_scores(data):\n max_rt = 0\n max_likes = 0\n rt = {}\n likes = {}\n for i in data:\n max_rt = max(data[i][\"retweet_count\"], max_rt)\n max_likes = max(data[i][\"favorite_count\"], max_likes)\n rt[i] = data[i][\"retweet_count\"]\n likes[i] = data[i][\"favorite_count\"]\n for i in data:\n if max_rt > 0:\n rt[i] = rt[i]/max_rt\n if max_likes > 0:\n likes[i] = likes[i]/max_likes\n return rt, likes", "def get_downvotes(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n\n votes = self.filter(content_type=content_type, object_id=obj._get_pk_val(), vote__exact=DOWNVOTE).aggregate(downvotes=Sum('vote'))\n\n if votes['downvotes'] is None:\n votes['downvotes'] = 0\n\n return -votes['downvotes']", "def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores" ]
[ "0.81031865", "0.79285175", "0.6855811", "0.67928356", "0.6763918", "0.6228135", "0.61716664", "0.59057057", "0.5784851", "0.5742161", "0.5692605", "0.5655364", "0.5652737", "0.56520414", "0.5641303", "0.5590621", "0.5585837", "0.5560485", "0.5549761", "0.55324996", "0.55164766", "0.55124456", "0.55105066", "0.54944307", "0.54825807", "0.54145485", "0.53995883", "0.539123", "0.5375009", "0.5334779" ]
0.80732733
1
Record a user's vote on a given object. Only allows a given user to vote once, though that vote may be changed. A zero vote indicates that any existing vote should be removed.
def record_vote(self, obj, vote, user): if vote not in (+1, 0, -1): raise ValueError('Invalid vote (must be +1/0/-1)') content_type = ContentType.objects.get_for_model(obj) # First, try to fetch the instance of this row from DB # If that does not exist, then it is the first time we're creating it # If it does, then just update the previous one try: vote_obj = self.get(voter=user, content_type=content_type, object_id=obj._get_pk_val()) if vote == 0 and not ZERO_VOTES_ALLOWED: vote_obj.delete() else: vote_obj.vote = vote vote_obj.save() except ObjectDoesNotExist: #This is the first time we're creating it try: if not ZERO_VOTES_ALLOWED and vote == 0: # This shouldn't be happening actually return vote_obj = self.create(voter=user, content_type=content_type, object_id=obj._get_pk_val(), vote=vote) except: print(( '{file}: something went wrong in creating a vote object at {line}'.format(file=str('__FILE__'), line=str('__LINE__')))) raise ObjectDoesNotExist return vote_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record_vote(self, obj, user, vote):\r\n if vote not in (+1, 0, -1):\r\n raise ValueError('Invalid vote (must be +1/0/-1)')\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n v = self.get(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val())\r\n if vote == 0:\r\n v.delete()\r\n else:\r\n v.vote = vote\r\n v.save()\r\n except models.ObjectDoesNotExist:\r\n if vote != 0:\r\n self.create(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val(), vote=vote)", "def record_vote_simple(self, obj, user, vote):#renamed from original record_vote\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n ctype = ContentType.objects.get_for_model(obj)\n try:\n v = self.get(user=user, content_type=ctype,\n object_id=obj._get_pk_val())\n if vote == 0:\n v.delete()\n else:\n v.vote = vote\n v.save()\n except models.ObjectDoesNotExist:\n if vote != 0:\n self.create(user=user, content_type=ctype,\n object_id=obj._get_pk_val(), vote=vote)", "def vote(self):\n if self.vote_exists():\n return self.update_vote()\n return self.create_vote()", "def add(self, score, user, ip_address, cookies={}, commit=True):\n try:\n score = int(score)\n except (ValueError, TypeError):\n raise InvalidRating(\"%s is not a valid choice for %s\" % (score, self.field.name))\n \n delete = (score == 0)\n if delete and not self.field.allow_delete:\n raise CannotDeleteVote(\"you are not allowed to delete votes for %s\" % (self.field.name,))\n # ... you're also can't delete your vote if you haven't permissions to change it. I leave this case for CannotChangeVote\n \n if score < 0 or score > self.field.range:\n raise InvalidRating(\"%s is not a valid choice for %s\" % (score, self.field.name))\n\n is_anonymous = (user is None or not user.is_authenticated())\n if is_anonymous and not self.field.allow_anonymous:\n raise AuthRequired(\"user must be a user, not '%r'\" % (user,))\n \n if is_anonymous:\n user = None\n \n defaults = dict(\n score = score,\n ip_address = ip_address,\n )\n \n kwargs = dict(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n user = user,\n )\n if not user:\n kwargs['ip_address'] = ip_address\n \n use_cookies = (self.field.allow_anonymous and self.field.use_cookies)\n if use_cookies:\n defaults['cookie'] = now().strftime('%Y%m%d%H%M%S%f') # -> md5_hexdigest?\n # TODO: move 'vote-%d.%d.%s' to settings or something\n cookie_name = 'vote-%d.%d.%s' % (kwargs['content_type'].pk, kwargs['object_id'], kwargs['key'][:6],) # -> md5_hexdigest?\n cookie = cookies.get(cookie_name) # try to get existent cookie value\n if not cookie:\n kwargs['cookie__isnull'] = True\n kwargs['cookie'] = cookie\n\n try:\n rating, created = Vote.objects.get(**kwargs), False\n except Vote.DoesNotExist:\n if delete:\n raise CannotDeleteVote(\"attempt to find and delete your vote for %s is failed\" % (self.field.name,))\n # print \"RATINGS_VOTES_PER_IP: \"\n # print getattr(settings, 'RATINGS_VOTES_PER_IP', RATINGS_VOTES_PER_IP)\n if getattr(settings, 'RATINGS_VOTES_PER_IP', RATINGS_VOTES_PER_IP):\n num_votes = Vote.objects.filter(\n content_type=kwargs['content_type'],\n object_id=kwargs['object_id'],\n key=kwargs['key'],\n ip_address=ip_address,\n ).count()\n if num_votes >= getattr(settings, 'RATINGS_VOTES_PER_IP', RATINGS_VOTES_PER_IP):\n raise Exception(\"Numero Maximo de votos por ip\")\n kwargs.update(defaults)\n if use_cookies:\n # record with specified cookie was not found ...\n cookie = defaults['cookie'] # ... thus we need to replace old cookie (if presented) with new one\n kwargs.pop('cookie__isnull', '') # ... and remove 'cookie__isnull' (if presented) from .create()'s **kwargs\n rating, created = Vote.objects.create(**kwargs), True\n \n has_changed = False\n if not created:\n if self.field.can_change_vote:\n has_changed = True\n self.score -= rating.score\n # you can delete your vote only if you have permission to change your vote\n if not delete:\n rating.score = score\n rating.save()\n else:\n self.votes -= 1\n rating.delete()\n else:\n raise CannotChangeVote()\n else:\n has_changed = True\n self.votes += 1\n if has_changed:\n if not delete:\n self.score += rating.score\n if commit:\n self.instance.save()\n #setattr(self.instance, self.field.name, Rating(score=self.score, votes=self.votes))\n \n defaults = dict(\n score = self.score,\n votes = self.votes,\n )\n \n kwargs = dict(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n )\n \n try:\n score, created = Score.objects.get(**kwargs), False\n except Score.DoesNotExist:\n kwargs.update(defaults)\n score, created = Score.objects.create(**kwargs), True\n \n if not created:\n score.__dict__.update(defaults)\n score.save()\n \n # return value\n adds = {}\n if use_cookies:\n adds['cookie_name'] = cookie_name\n adds['cookie'] = cookie\n if delete:\n adds['deleted'] = True\n return adds", "def vote(request, model, object_id):\n if request.method != 'POST':\n raise Http404\n\n vote_type = request.POST.get('type', None)\n if vote_type == 'up' and auth.can_vote_up(request.user):\n vote_type = Vote.VOTE_UP\n elif vote_type == 'down' and auth.can_vote_down(request.user):\n vote_type = Vote.VOTE_DOWN\n else:\n raise Http404\n\n # TODO Ensure users can't vote on their own posts\n\n obj = get_object_or_404(model, id=object_id, deleted=False, locked=False)\n content_type = ContentType.objects.get_for_model(model)\n try:\n existing_vote = Vote.objects.get(content_type=content_type,\n object_id=object_id,\n user=request.user)\n except Vote.DoesNotExist:\n existing_vote = None\n\n if existing_vote is None:\n Vote.objects.create(content_type=content_type,\n object_id=object_id,\n user=request.user,\n vote=vote_type)\n else:\n if vote_type == existing_vote.vote:\n existing_vote.delete()\n else:\n existing_vote.vote = vote_type\n existing_vote.save()\n\n # TODO Reputation management\n\n if request.is_ajax():\n return JsonResponse({\n 'success': True,\n 'score': model._default_manager.filter(\n id=object_id).values_list('score', flat=True)[0],\n })\n else:\n return HttpResponseRedirect(obj.get_absolute_url())", "def record_vote(request):\n result = \"success\"\n try:\n rating, created = Rating.objects.get_or_create(key=request.POST['id'])\n key = request.POST['id']\n ip = request.META['REMOTE_ADDR']\n event, newevent = RatingEvent.objects.get_or_create(key=key,ip=ip)\n if not newevent:\n event.is_changing = True\n event.old_value = event.value\n\n event.value = int(request.POST['vote'])\n rating.add_rating(event)\n rating.save()\n event.save()\n result = \"%s/5 rating ( %s votes)\" % (rating.avg_rating, rating.total_votes)\n except:\n transaction.rollback()\n result = 'error'\n else:\n transaction.commit()\n\n return HttpResponse(result)", "def _force_vote(self, user, value):\n previous = 0\n if value == 0:\n # Delete any previous vote object\n for v in Vote.objects.filter(user=user, content=self):\n previous = v.value\n v.delete()\n else:\n # Create or change vote object\n v, created = Vote.objects.get_or_create(user=user, content=self)\n previous = v.value\n v.value = value\n v.save(update_fields=['value'])\n return (previous-value)*(-1)", "def up_vote(cls, user, message):\r\n pass", "def vote(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n _logger.info(\"%s is trying to vote on %s\", request.user, context['song'])\n vote_dict = get_vote_dict(request.user)\n can_vote = context['song'].id not in vote_dict[request.user.id] and context['song'].ready\n if can_vote:\n vote = Vote()\n vote.user = request.user\n vote.song = context['song']\n vote.save()\n vote_dict[request.user.id].append(context['song'].id)\n cache.set('vote_dict', vote_dict)\n logging.info('%s voted on %s.', request.user, context['song'])\n return HttpResponse('Vote registered on %s.' % context['song'])\n else:\n logging.info('%s tried to vote more than once on %s.', request.user.username, context['song'])\n return HttpResponse(\"Du har allerede stemt på denne sangen i dag!\", content_type='text/plain', status=403)", "def up_vote(cls, user, message):\n pass", "def create_vote(self):\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n try:\n query = \"INSERT INTO votes(user_id, answer_id, vote) VALUES(%s, %s, %s)\"\n cur.execute(query, (self.user_id, self.answer_id, self.vote_value))\n con.commit()\n except Exception as e:\n print(e)\n con.close()\n return False\n return True", "def update_vote(self):\n if not self.answer_id:\n return False\n try:\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n query = \"UPDATE votes SET vote=%s WHERE answer_id=%s AND user_id=%s\"\n cur.execute(query, (self.vote_value, self.answer_id, self.user_id))\n con.commit()\n except Exception as e:\n print(e)\n con.close()\n return False\n return True", "def toggle_vote(self, user, value):\n try:\n v = Vote.objects.get(user=user, content=self)\n except Vote.DoesNotExist:\n Vote.objects.create(user=user, content=self, value=value)\n else:\n if v.value == value:\n v.delete()\n else:\n v.value = value\n v.save(update_fields=['value'])\n\n self.up = self.votes.count_upvotes()\n self.down = self.votes.count_downvotes()\n self.set_points()\n self.set_timepoints()\n self.save(update_fields=['up', 'down', 'points', 'timepoints'])", "def review_vote_put_handler(review_id, user):\n def fetch_params():\n placet = Parser.bool('json', 'placet')\n return placet\n review = Review.query.get_or_404(str(review_id))\n if review.is_archived is True:\n raise NotFound\n placet = fetch_params()\n if review.user_id == user.id:\n raise InvalidRequest(desc='You cannot rate your own review.')\n if user.is_vote_limit_exceeded is True and user.has_voted(review) is False:\n raise LimitExceeded('You have exceeded your limit of votes per day.')\n if placet is True and user.user_type not in review.review_class.upvote:\n raise InvalidRequest(desc='You are not allowed to upvote this review.')\n if placet is False and user.user_type not in review.review_class.downvote:\n raise InvalidRequest(desc='You are not allowed to downvote this review.')\n Vote.create(user, review, placet) # overwrites an existing vote, if needed\n return jsonify(message='Request processed successfully')", "def set_vote_for_object_parameter(obj, user, value, uuid = None, tpclass = None, name = None, comment = None, caption = None):\n t = type(obj)\n if t not in parameter_class_map:\n raise TypeError('obj has wrong type {0}'.format(t))\n\n pclass = parameter_class_map[t]['param']\n pvalclass = parameter_class_map[t]['val']\n pvlclass = parameter_class_map[t]['vl']\n pvoteclass = parameter_class_map[t].get('vote')\n\n if isinstance(uuid, basestring):\n q = Q(uuid=uuid) & Q(obj=obj)\n else:\n q = Q(obj=obj) & Q(tpclass=tpclass)\n if tpclass == 'user':\n if not isinstance(name, basestring):\n raise Exception('name must be string when tpclass == \"user\"')\n q &= Q(name=name)\n # get parameter\n prm = pclass.objects.filter(q).all()[0]\n if prm.enum:\n if pvlclass.objects.filter(Q(value=value) & Q(parameter=prm)).count() == 0:\n raise ValueError('this value can not be accepted')\n\n # get or create voted value\n pval = get_or_create_object(pvalclass,\n {'parameter' : prm,\n 'value' : value,\n 'status' : 'voted'},\n {'caption' : caption},\n can_change = (lambda a: False))\n # delete all other votes for values of this parameter\n pvoteclass.objects.filter(Q(voter=user) &\n Q(parameter_val__status='voted') &\n Q(parameter_val__parameter=prm)).delete()\n # create vote for our value\n vt = pvoteclass(voter=user,\n parameter_val=pval)\n if isinstance(comment, basestring):\n vt.comment = comment\n vt.save(force_insert=True)", "def addProposalVote(user_id, rc_id, vote):\n\n db = getDB()\n proposal = db.proposals.find_one({\"rc_id\": rc_id})\n db.proposal_votes.update(\n {\"user_id\": user_id, \"proposal_id\": proposal[\"_id\"]},\n {\"user_id\": user_id, \"yes_vote\": vote == \"yes\", \"proposal_id\": proposal[\"_id\"]},\n upsert=True,\n )", "def create_vote(self, data, header):\n return self.client.post(\n path='/api/v2/votes/', data=json.dumps(data), content_type='application/json', headers=header)", "def do_vote(self, stats, vote_val):\n vote = self.get_vote(stats)\n if vote is None:\n vote = Vote()\n vote.user = self\n vote.stats = stats\n vote.value = vote_val\n return vote", "def add_vote(self, source, target):\n\n if self.votes.get(source, None)==target:\n return # Don't need to change a thing.\n self.votes[source] = target\n\n qty = self.voted.get(target, 0)\n self.voted[target] = qty + 1\n pass", "def get_for_user(self, obj, user):\n if not user.is_authenticated:\n return None\n content_object = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(voter=user, content_type=content_object, object_id=obj._get_pk_val())\n\n except ObjectDoesNotExist:\n #print('No vote by {user} on {object}'.format(user=user, object=obj))\n return None\n\n return vote", "def obj_create(self, bundle, request=None, **kwargs):\n return super(VoteResource, self).obj_create(bundle, request, user=request.user)", "def vote(self, data, suffix=''): # pylint: disable=unused-argument\n # Here is where we would prevent a student from voting twice, but then\n # we couldn't click more than once in the demo!\n #\n # if self.voted:\n # log.error(\"cheater!\")\n # return\n\n votes = json.load(self.fs.open(u\"thumbsvotes.json\"))\n self.upvotes = votes['up']\n self.downvotes = votes['down']\n\n if data['voteType'] not in ('up', 'down'):\n log.error('error!')\n return\n\n if data['voteType'] == 'up':\n self.upvotes += 1\n else:\n self.downvotes += 1\n\n with self.fs.open(u'thumbsvotes.json', 'wb') as file_output:\n file_output.write(\n json.dumps({'up': self.upvotes, 'down': self.downvotes}).encode()\n )\n\n self.voted = True\n\n return {'up': self.upvotes, 'down': self.downvotes}", "def up_vote2(self, uid, pid):\n vid = self.id_generator(self.votes)\n vote = {'$setOnInsert': [{'Id' : vid}, {'PostId': pid}, {'VoteTypeId': \"2\"}, {'CreationDate': datetime.now()}]}\n if uid != '':\n vote['$setOnInsert'].append({'UserId':uid})\n result = self.votes.update_one({'$and':[{'UserId': uid}, {'PostId' : pid}]}, vote, upsert=True)\n if result.matched_count:\n print(\"Already voted on this post!\\n\")\n return 0\n self.posts.find_one_and_update({'PostId': pid}, {'$inc':{'Score': 1}})\n '''\n votes = self.votes.find({'UserId': uid})\n for vote in votes:\n if vote['PostId'] == pid:\n \n\n post = self.get_post(pid)\n score = post['Score']\n self.posts.update_one({'Id': pid}, {'$set': {'Score': (score+1)}})\n dic_vote = {}\n dic_vote['Id'] = self.id_generator(self.votes)\n dic_vote['PostId'] = pid\n dic_vote['VoteTypeId'] = \"2\"\n dic_vote['CreationDate'] = datetime.now()\n if uid != \"\":\n dic_vote[\"UserId\"] = uid\n self.votes.insert_one(dic_vote)\n '''", "def update_vote(name: str, party: str, votes: dict, vote_count: dict) -> bool:\r\n if auth_vote(name, party, votes):\r\n vote_count[party] += 1\r\n return True\r\n return False", "def toggle_vote(self):\n\n self.vote = 1 - self.vote", "def get_for_user(self, obj, user):\r\n if not user.is_authenticated():\r\n return None\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\r\n user=user)\r\n except models.ObjectDoesNotExist:\r\n vote = None\r\n return vote", "def add_vote():\n \n\n comment_id = request.form.get(\"comment_id\")\n voted_item = request.form.get(\"voted_item\")\n\n\n comment = Comment.query.get(int(comment_id))\n \n \n vote_check = Vote.query.filter(Vote.comment_id == int(comment_id), Vote.user_id == session['user_id']).first()\n if vote_check:\n db.session.delete(vote_check)\n db.session.commit()\n else:\n vote_added = Vote(user_id = session['user_id'], comment_id = int(comment_id), up_vote = True)\n db.session.add(vote_added)\n db.session.commit()\n\n \n \n result = {'vote': comment.vote_count(), \"comment_id\": comment_id}\n return jsonify(result)", "def add_vote():\n article_name = request.json.get('article_name','')\n username = request.json.get('username','')\n user = Participant.query.find(Participant.username == username).first_or_404()\n\n article = Article.query.filter(Article.name == article_name).first_or_404()\n group = user.group \n stack_entry = Stack.query.filter(Stack.article == exist_article & Stack.group == group).first_or_404()\n \n vote = Vote(voter = user, article = stack_entry)\n db.session.add(vote)\n db.session.commit()\n\n return f\"Vote added for {user} on {article}\", 201", "def get_for_user(self, obj, user):\n if not user.is_authenticated():\n return None\n ctype = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\n user=user)\n except models.ObjectDoesNotExist:\n vote = None\n return vote", "def has_voted(self, user):\n return user.choice_set.filter(vote=self).exists()" ]
[ "0.85912955", "0.80805624", "0.70674664", "0.68415457", "0.6664023", "0.65120435", "0.6498817", "0.648873", "0.6425544", "0.6396006", "0.6346099", "0.6249975", "0.6247208", "0.61925924", "0.61074173", "0.60815287", "0.60402983", "0.6006947", "0.59739923", "0.59494007", "0.5919922", "0.5884366", "0.58233756", "0.58174163", "0.5750016", "0.5749122", "0.5732058", "0.5714961", "0.57137865", "0.5704903" ]
0.8415383
1
Get the top N scored objects for a given model. Yields (object, score) tuples.
def get_top(self, model, limit=10, inverted=False): content_type= ContentType.objects.get_for_model(model) #Get a queryset of all the objects of the model. Get their scores results = self.filter(content_type=content_type).values('object_id').annotate(score=Sum('vote')) if inverted: results = results.order_by('score') else: results = results.order_by('-score') #We have a iterable list of objects of the requested model and their respective scores # Use in_bulk() to avoid O(limit) db hits. class_name = content_type.model_class() objects = class_name.objects.in_bulk([item['object_id'] for item in results[:limit]]) # Yield each object, score pair. Because of the lazy nature of generic # relations, missing objects are silently ignored. for item in results[:limit]: id, score = item['object_id'], item['score'] if not score: continue if int(id) in objects: yield objects[int(id)], int(score)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_top(self, Model, limit=10, reversed=False):\n ctype = ContentType.objects.get_for_model(Model)\n query = \"\"\"\n SELECT object_id, SUM(vote) as %s\n FROM %s\n WHERE content_type_id = %%s\n GROUP BY object_id\"\"\" % (\n connection.ops.quote_name('score'),\n connection.ops.quote_name(self.model._meta.db_table),\n )\n\n # MySQL has issues with re-using the aggregate function in the\n # HAVING clause, so we alias the score and use this alias for\n # its benefit.\n if settings.DATABASE_ENGINE == 'mysql':\n having_score = connection.ops.quote_name('score')\n else:\n having_score = 'SUM(vote)'\n if reversed:\n having_sql = ' HAVING %(having_score)s < 0 ORDER BY %(having_score)s ASC LIMIT %%s'\n else:\n having_sql = ' HAVING %(having_score)s > 0 ORDER BY %(having_score)s DESC LIMIT %%s'\n query += having_sql % {\n 'having_score': having_score,\n }\n\n cursor = connection.cursor()\n cursor.execute(query, [ctype.id, limit])\n results = cursor.fetchall()\n\n # Use in_bulk() to avoid O(limit) db hits.\n objects = Model.objects.in_bulk([id for id, score in results])\n\n # Yield each object, score pair. Because of the lazy nature of generic\n # relations, missing objects are silently ignored.\n for id, score in results:\n if id in objects:\n yield objects[id], int(score)", "def get_top(self, Model, limit=10, reversed=False):\r\n ctype = ContentType.objects.get_for_model(Model)\r\n query = \"\"\"\r\n SELECT object_id, SUM(vote) as %s\r\n FROM %s\r\n WHERE content_type_id = %%s\r\n GROUP BY object_id\"\"\" % (\r\n qn('score'),\r\n qn(self.model._meta.db_table),\r\n )\r\n\r\n # MySQL has issues with re-using the aggregate function in the\r\n # HAVING clause, so we alias the score and use this alias for\r\n # its benefit.\r\n if settings.DATABASE_ENGINE == 'mysql':\r\n having_score = qn('score')\r\n else:\r\n having_score = 'SUM(vote)'\r\n if reversed:\r\n having_sql = ' HAVING %(having_score)s < 0 ORDER BY %(having_score)s ASC %(limit_offset)s'\r\n else:\r\n having_sql = ' HAVING %(having_score)s > 0 ORDER BY %(having_score)s DESC %(limit_offset)s'\r\n query += having_sql % {\r\n 'having_score': having_score,\r\n 'limit_offset': connection.ops.limit_offset_sql(limit),\r\n }\r\n\r\n cursor = connection.cursor()\r\n cursor.execute(query, [ctype.id])\r\n results = cursor.fetchall()\r\n\r\n # Use in_bulk() to avoid O(limit) db hits.\r\n objects = Model.objects.in_bulk([id for id, score in results])\r\n\r\n # Yield each object, score pair. Because of the lazy nature of generic\r\n # relations, missing objects are silently ignored.\r\n for id, score in results:\r\n if id in objects:\r\n yield objects[id], int(score)", "def top_by_ratings(self, n, metric=average):\n return top_movies", "def top_by_num_of_ratings(self, n):\n return top_movies", "def top_boys(self):\n return [boy for boy in self._db.boys.find().sort('rating', pymongo.DESCENDING).limit(5)]", "def get_top_models(self, return_scores=True):\n self.greater_score_is_better = is_greater_better(self.scoring_function)\n model_names = list(set([key.split('(')[0] for key in\n self.evaluated_individuals_.keys()]))\n models = OrderedDict({model: [] for model in model_names})\n for k in self.evaluated_individuals_:\n models[k.split('(')[0]].append(self.evaluated_individuals_[k])\n for model_name in model_names:\n models[model_name]=sorted(models[model_name],\n key=lambda x: x['internal_cv_score'],\n reverse=self.greater_score_is_better)\n self.models = models\n top_models = {model: models[model][0] for model in models}\n self.top_models = OrderedDict(\n sorted(top_models.items(),\n key=lambda x:x[1]['internal_cv_score'],\n reverse=self.greater_score_is_better))\n scores = {model: self.top_models[model]['internal_cv_score']\\\n for model in self.top_models}\n self.top_models_scores = OrderedDict(sorted(\n scores.items(), key=lambda x: x[1],\n reverse=self.greater_score_is_better))\n if return_scores:\n return self.top_models_scores\n else:\n return self.top_models", "def top_students(grade_book, num_students=3):\n return sorted(grade_book, key=grade_book.get, reverse=True)[:num_students]", "def top_n(self, n):\n top = {}\n for code, feat_set in self.iteritems():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top[code] = best\n return top", "def top_n_scores(snack_data, percentage_data, n, snack_query, protein_query, carb_query, fat_query):\n\tstart_time = time.time()\n\n\t#Loop through the snacks in dictionary and compute the score for each one\n\tscores_list = []\n\t\n\tfor title, info in snack_data.items():\n\t\tscore = get_score(snack_data, percentage_data, title, snack_query, protein_query, carb_query, fat_query)\n\t\tscores_list.append((title, score))\n\n\tscores_list.sort(key=lambda tup: tup[1], reverse=True)\n\tprint(\"top_n_scores() time: --- %s seconds ---\" % (time.time() - start_time))\n\n\treturn scores_list[0:n]", "def most_popular(n=5):\n cars = Car.objects.annotate(review_number=models.Count('reviews'))\n sorted_cars = cars.order_by('review_number')\n return sorted_cars[:n]", "def top_girls(self):\n return [girl for girl in self._db.girls.find().sort('rating', pymongo.DESCENDING).limit(5)]", "def test_sort_more_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n e4 = Experience(rid=1, uid=22, experience=1839)\n e5 = Experience(rid=1, uid=2, experience=20)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.add(e4)\n db.session.add(e5)\n db.session.commit()\n list = top_n_in_order(1, 3)\n self.assertEqual([(22, 1839), (12, 1343), (3, 100)], list)", "def top_students(mongo_collection):\n students = mongo_collection.find()\n best_students = []\n for student in students:\n topics = student[\"topics\"]\n score = 0\n for topic in topics:\n score = score + topic[\"score\"]\n avg = score / len(topics)\n student[\"averageScore\"] = avg\n best_students.append(student)\n return sorted(best_students, key=lambda i: i[\"averageScore\"], reverse=True)", "def top_students(mongo_collection):\n all_items = mongo_collection.find({})\n for item in all_items:\n count = 0\n new_topics = item\n for sta in item.get(\"topics\"):\n count += sta.get(\"score\")\n averageScore = count/len(item.get(\"topics\"))\n\n myquery = {\"name\": item.get(\"name\")}\n newvalues = {\"$set\": {\"averageScore\": averageScore}}\n mongo_collection.update_many(myquery, newvalues)\n\n order = mongo_collection.find().sort(\"averageScore\", DESCENDING)\n\n return order", "def test_sort_fewer_than_n(self):\n e1 = Experience(rid=1, uid=3, experience=100)\n e2 = Experience(rid=1, uid=1, experience=89)\n e3 = Experience(rid=1, uid=12, experience=1343)\n db.session.add(e1)\n db.session.add(e2)\n db.session.add(e3)\n db.session.commit()\n list = top_n_in_order(1,5)\n self.assertEqual([(12, 1343), (3, 100), (1, 89)], list)", "def top(self, k):\n if not 1 <= k <= len(self):\n raise ValueError('Illegal value for k')\n walk = self._data.first()\n for j in range(k):\n item = walk.element() # element of list is Item\n yield item._value # using the customized __iter__ method\n walk = self._data.after(walk)", "def top(self, k):\n if not 1 <= k <= len(self):\n raise ValueError('Illegal value for k')\n walk = self._data.first()\n for j in range(k):\n item = walk.element() # element of list is _Item\n yield item._value\n walk = self._data.after(walk)", "def get_topN_docs(click_model, queryID):\n # for every queryID, find the first top 10 relevant docs\n unordered_docs = [] # list of [ind, rank] for all relevant docs\n for ind in click_model[queryID]:\n document = click_model[queryID][ind]\n rank = document['rank']\n # moving the docID into the doc-dictionary, so I can shuffle documents and easily keep this info\n # a little hackey, but can be changed if needed later down the pipeline\n document['docID'] = ind\n if rank is not None:\n unordered_docs.append([ind, rank])\n # sorts unordered_docs by doc rankings\n ordered_docs = (sorted(unordered_docs, key=lambda docs: docs[1]))\n ordered_docIDs = list(zip(*ordered_docs))[0]\n\n ranked_docs = []\n for docID in ordered_docIDs:\n ranked_docs.append(click_model[queryID][docID])\n return ranked_docs", "def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]", "def top_matches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]", "def cy_process_recommendations(entities, scores, n=10):\n r = c_funcs.cy_aggregate_scores(entities, scores, n)\n heapq.heapify(r)\n return {'result': [{\"item\": k, \"score\": v} for k, v in heapq.nlargest(\n n, r, key= lambda x: x[1])]}", "def top_controversial(self, n):\n return top_movies", "def query_top_buys(cls,N=20):\n # NOTE: this is not used currently\n gds = from_cache('VG_TOP_%d'%N)\n if not gds:\n gds = [str(g.id()) for g in SuiGoods.all(keys_only=True).order('-likes').fetch(N)]\n to_cache('VG_TOP_%d'%N,gds,3600*24)\n return SuiGoods.load_by_ids(gds)", "def top_items(self, n=10, filter=None):\n if n > len(self): n = len(self)\n order = np.argsort(self)\n if filter is None:\n indices = order[-1:-n-1:-1]\n return [(self.label(idx), self[idx]) for idx in indices]\n idx = -1\n results = []\n while len(results) != n and idx >= -len(order):\n where = order[idx]\n label = self.label(where)\n if filter(label):\n results.append((label, self[where]))\n idx -= 1\n return results", "def textrank(sentences, top_n, stopwords=None):\n S = build_similarity_matrix(sentences, stopwords) \n sentence_ranking = page_rank(S)\n \n # Sort the sentence ranks\n ranked_sentence_indexes = [item[0] for item in sorted(enumerate(sentence_ranking), key=lambda item: -item[1])]\n selected_sentences = sorted(ranked_sentence_indexes[:top_n])\n summary = itemgetter(*selected_sentences)(sentences)\n return summary", "def get_top_pages(model=None):\n return get_page_children(page=None, model=model)", "def topMatches(prefs, person, n=5, similarity=sim_pearson):\n all_matches = [(similarity(prefs, person, other), other) \n for other in prefs.keys()\n if person != other]\n all_matches.sort()\n all_matches.reverse()\n return all_matches[0:n]", "def top_n(self, n: int = 10) -> dict:\n return self.members[:n]", "def top_n(self, n: int = 10) -> dict:\n return self.members[:n]", "def get_popularity_based_topk(self, top_k=10, sort_top_k=False):\n\n test_scores = np.array([self.item_frequencies])\n\n logger.info('Getting top K')\n top_items, top_scores = get_top_k_scored_items(\n scores=test_scores, top_k=top_k, sort_top_k=sort_top_k\n )\n\n return pd.DataFrame(\n {\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )" ]
[ "0.6620148", "0.6568486", "0.6029523", "0.5966627", "0.5958777", "0.588728", "0.57843107", "0.57750875", "0.5712884", "0.5682572", "0.5677662", "0.5639743", "0.5635089", "0.5624442", "0.5623601", "0.55735147", "0.5531635", "0.5519599", "0.55086815", "0.5506289", "0.5492053", "0.5393359", "0.53763264", "0.53093153", "0.5306699", "0.52754396", "0.5267347", "0.5247458", "0.5247458", "0.5222556" ]
0.7162024
0
Get the vote made on the given object by the given user, or ``None`` if no matching vote exists.
def get_for_user(self, obj, user): if not user.is_authenticated: return None content_object = ContentType.objects.get_for_model(obj) try: vote = self.get(voter=user, content_type=content_object, object_id=obj._get_pk_val()) except ObjectDoesNotExist: #print('No vote by {user} on {object}'.format(user=user, object=obj)) return None return vote
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_for_user(self, obj, user):\n if not user.is_authenticated():\n return None\n ctype = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\n user=user)\n except models.ObjectDoesNotExist:\n vote = None\n return vote", "def get_for_user(self, obj, user):\r\n if not user.is_authenticated():\r\n return None\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\r\n user=user)\r\n except models.ObjectDoesNotExist:\r\n vote = None\r\n return vote", "def _user_vote(self, user):\n from . import Vote\n\n if not user.is_authenticated:\n return None\n\n return (\n Vote.query\n .filter(Vote.type == 'links')\n .filter(Vote.user_id == user.id)\n .filter(Vote.thing_id == self.id)\n .first()\n )", "def get_vote_value_for_object_parameter(obj, user, uuid = None, tpclass = None, name = None):\n t = type(obj)\n if t not in parameter_class_map:\n raise TypeError('type of the object must be model with parameters, not {0}'.format(t))\n\n valclass = parameter_class_map[t]['val']\n voteclass = parameter_class_map[t]['vote']\n q = Q(status='voted') & Q(parameter__obj=obj) & Q(**{'{0}__voter'.format(voteclass.__name__.lower()) : user})\n if isinstance(uuid, basestring):\n q &= Q(parameter__uuid = uuid)\n else:\n q &= Q(parameter__tpclass=tpclass)\n if tpclass == 'user':\n if not isinstance(name, basestring):\n raise Exception('name must be string if tpclass == \"user\"')\n q &= Q(parameter__name=name)\n try:\n ret = valclass.objects.filter(q).all()[0]\n except IndexError:\n return None\n return ret", "def vote(self):\n if self.vote_exists():\n return self.update_vote()\n return self.create_vote()", "def record_vote(self, obj, vote, user):\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n content_type = ContentType.objects.get_for_model(obj)\n # First, try to fetch the instance of this row from DB\n # If that does not exist, then it is the first time we're creating it\n # If it does, then just update the previous one\n try:\n vote_obj = self.get(voter=user, content_type=content_type, object_id=obj._get_pk_val())\n if vote == 0 and not ZERO_VOTES_ALLOWED:\n vote_obj.delete()\n else:\n vote_obj.vote = vote\n vote_obj.save()\n\n except ObjectDoesNotExist:\n #This is the first time we're creating it\n try:\n if not ZERO_VOTES_ALLOWED and vote == 0:\n # This shouldn't be happening actually\n return\n vote_obj = self.create(voter=user, content_type=content_type, object_id=obj._get_pk_val(), vote=vote)\n except:\n print(( '{file}: something went wrong in creating a vote object at {line}'.format(file=str('__FILE__'), line=str('__LINE__'))))\n raise ObjectDoesNotExist\n\n return vote_obj", "def get_object_with_user(self, user):\n try:\n uid = int(user)\n except TypeError:\n try:\n uid = user.id \n except:\n return None\n try:\n return self.get(db_player__user__id=uid)\n except Exception:\n return None", "def vote(request, model, object_id):\n if request.method != 'POST':\n raise Http404\n\n vote_type = request.POST.get('type', None)\n if vote_type == 'up' and auth.can_vote_up(request.user):\n vote_type = Vote.VOTE_UP\n elif vote_type == 'down' and auth.can_vote_down(request.user):\n vote_type = Vote.VOTE_DOWN\n else:\n raise Http404\n\n # TODO Ensure users can't vote on their own posts\n\n obj = get_object_or_404(model, id=object_id, deleted=False, locked=False)\n content_type = ContentType.objects.get_for_model(model)\n try:\n existing_vote = Vote.objects.get(content_type=content_type,\n object_id=object_id,\n user=request.user)\n except Vote.DoesNotExist:\n existing_vote = None\n\n if existing_vote is None:\n Vote.objects.create(content_type=content_type,\n object_id=object_id,\n user=request.user,\n vote=vote_type)\n else:\n if vote_type == existing_vote.vote:\n existing_vote.delete()\n else:\n existing_vote.vote = vote_type\n existing_vote.save()\n\n # TODO Reputation management\n\n if request.is_ajax():\n return JsonResponse({\n 'success': True,\n 'score': model._default_manager.filter(\n id=object_id).values_list('score', flat=True)[0],\n })\n else:\n return HttpResponseRedirect(obj.get_absolute_url())", "def get_vote(self, stats):\n if not isinstance(stats, Stats):\n raise TypeError\n return Vote.query.filter_by(user_id=self.id, stats_id=stats.id).first()", "def get_rating_for_user(self, user, ip_address=None, cookies={}):\n kwargs = dict(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n )\n\n if not (user and user.is_authenticated()):\n if not ip_address:\n raise ValueError('``user`` or ``ip_address`` must be present.')\n kwargs['user__isnull'] = True\n kwargs['ip_address'] = ip_address\n else:\n kwargs['user'] = user\n \n use_cookies = (self.field.allow_anonymous and self.field.use_cookies)\n if use_cookies:\n # TODO: move 'vote-%d.%d.%s' to settings or something\n cookie_name = 'vote-%d.%d.%s' % (kwargs['content_type'].pk, kwargs['object_id'], kwargs['key'][:6],) # -> md5_hexdigest?\n cookie = cookies.get(cookie_name)\n if cookie: \n kwargs['cookie'] = cookie\n else:\n kwargs['cookie__isnull'] = True\n \n try:\n rating = Vote.objects.get(**kwargs)\n return rating.score\n except Vote.MultipleObjectsReturned:\n pass\n except Vote.DoesNotExist:\n pass\n return", "def vote_on_object(request, model, direction, post_vote_redirect=None,\r\n object_id=None, slug=None, slug_field=None, template_name=None,\r\n template_loader=loader, extra_context=None, context_processors=None,\r\n template_object_name='object', allow_xmlhttprequest=False):\r\n if (allow_xmlhttprequest and\r\n request.META.has_key('HTTP_X_REQUESTED_WITH') and\r\n request.META['HTTP_X_REQUESTED_WITH'] == 'XMLHttpRequest'):\r\n return xmlhttprequest_vote_on_object(request, model, direction,\r\n object_id=object_id, slug=slug,\r\n slug_field=slug_field)\r\n\r\n if extra_context is None: extra_context = {}\r\n if not request.user.is_authenticated():\r\n return redirect_to_login(request.path)\r\n\r\n try:\r\n vote = dict(VOTE_DIRECTIONS)[direction]\r\n except KeyError:\r\n raise AttributeError('\\'%s\\' is not a valid vote type.' % vote_type)\r\n\r\n # Look up the object to be voted on\r\n lookup_kwargs = {}\r\n if object_id:\r\n lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id\r\n elif slug and slug_field:\r\n lookup_kwargs['%s__exact' % slug_field] = slug\r\n else:\r\n raise AttributeError('Generic vote view must be called with either object_id slug/slug_field.')\r\n try:\r\n obj = model._default_manager.get(**lookup_kwargs)\r\n except ObjectDoesNotExist:\r\n raise Http404, 'No %s found for %s.' % (model._meta.app_label, lookup_kwargs)\r\n\r\n if request.method == 'POST':\r\n if post_vote_redirect is not None:\r\n next = post_vote_redirect\r\n elif request.REQUEST.has_key('next'):\r\n next = request.REQUEST['next']\r\n elif hasattr(obj, 'get_absolute_url'):\r\n if callable(getattr(obj, 'get_absolute_url')):\r\n next = obj.get_absolute_url()\r\n else:\r\n next = obj.get_absolute_url\r\n else:\r\n raise AttributeError('Generic vote view must be called with either post_vote_redirect, a \"next\" parameter in the request, or the object being voted on must define a get_absolute_url method or property.')\r\n Vote.objects.record_vote(obj, request.user, vote)\r\n return HttpResponseRedirect(next)\r\n else:\r\n if not template_name:\r\n template_name = '%s/%s_confirm_vote.html' % (model._meta.app_label, model._meta.object_name.lower())\r\n t = template_loader.get_template(template_name)\r\n c = RequestContext(request, {\r\n template_object_name: obj,\r\n 'direction': direction,\r\n }, context_processors)\r\n for key, value in extra_context.items():\r\n if callable(value):\r\n c[key] = value()\r\n else:\r\n c[key] = value\r\n response = HttpResponse(t.render(c))\r\n return response", "def record_vote_simple(self, obj, user, vote):#renamed from original record_vote\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n ctype = ContentType.objects.get_for_model(obj)\n try:\n v = self.get(user=user, content_type=ctype,\n object_id=obj._get_pk_val())\n if vote == 0:\n v.delete()\n else:\n v.vote = vote\n v.save()\n except models.ObjectDoesNotExist:\n if vote != 0:\n self.create(user=user, content_type=ctype,\n object_id=obj._get_pk_val(), vote=vote)", "def do_vote(self, stats, vote_val):\n vote = self.get_vote(stats)\n if vote is None:\n vote = Vote()\n vote.user = self\n vote.stats = stats\n vote.value = vote_val\n return vote", "def record_vote(self, obj, user, vote):\r\n if vote not in (+1, 0, -1):\r\n raise ValueError('Invalid vote (must be +1/0/-1)')\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n v = self.get(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val())\r\n if vote == 0:\r\n v.delete()\r\n else:\r\n v.vote = vote\r\n v.save()\r\n except models.ObjectDoesNotExist:\r\n if vote != 0:\r\n self.create(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val(), vote=vote)", "def find(self, user_id: UserId) -> Optional[U]:\n ...", "def get_user_by_id(user_id: int) -> User:\n session = Session()\n\n # verify user_id exists\n vote_user: User = session.query(User).filter(User.id == user_id).first()\n session.close()\n\n if not vote_user:\n raise UserNotFoundException\n\n return vote_user", "def get_object(self):\n try:\n self.object = User.objects.get(username= self.request.user)\n print(self.object)\n return self.object\n except:\n return None", "def get_object(self):\n try:\n self.object = User.objects.get(username= self.request.user)\n print(self.object)\n return self.object\n except:\n return None", "def get_object(self):\n return get_object_or_404(User, pk__iexact=self.request.user.id)", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def get_object(self):\n return get_object_or_404(User, id__iexact=self.request.user.id)", "def review_vote_entity_handler(review_id, user):\n review = Review.query.get_or_404(str(review_id))\n vote = Vote.query.filter_by(user=user, review=review).first()\n if not vote:\n raise NotFound\n else:\n return jsonify(vote=vote.to_dict())", "def find_one(self, user_id):\n pass", "def do_votes_by_user(parser, token):\r\n bits = token.contents.split()\r\n if len(bits) != 6:\r\n raise template.TemplateSyntaxError(\"'%s' tag takes exactly four arguments\" % bits[0])\r\n if bits[2] != 'on':\r\n raise template.TemplateSyntaxError(\"second argument to '%s' tag must be 'on'\" % bits[0])\r\n if bits[4] != 'as':\r\n raise template.TemplateSyntaxError(\"fourth argument to '%s' tag must be 'as'\" % bits[0])\r\n return VotesByUserNode(bits[1], bits[3], bits[5])", "def get_user_votes(user_id: int) -> int:\n session = Session()\n\n # get user by id to ensure user exists\n get_user_by_id(user_id)\n # count votes for the user that haven't expired\n user_votes: int = session.query(Vote)\\\n .filter(Vote.user_id == user_id)\\\n .filter(Vote.vote_expiry > datetime.datetime.now()).count()\n\n session.close()\n\n return user_votes", "def get_object(self, queryset=None):\n\t\tobj = self.request.user\n\t\treturn obj", "def get_vote(self, id: int) -> dict:", "def get_object(self, queryset=None):\n return self.request.user", "def do_vote_by_user(parser, token):\r\n bits = token.contents.split()\r\n if len(bits) != 6:\r\n raise template.TemplateSyntaxError(\"'%s' tag takes exactly five arguments\" % bits[0])\r\n if bits[2] != 'on':\r\n raise template.TemplateSyntaxError(\"second argument to '%s' tag must be 'on'\" % bits[0])\r\n if bits[4] != 'as':\r\n raise template.TemplateSyntaxError(\"fourth argument to '%s' tag must be 'as'\" % bits[0])\r\n return VoteByUserNode(bits[1], bits[3], bits[5])", "def get_parameter_voter(obj, status, value, tpclass = None, name = None ,uuid = None):\n t = type(obj)\n if t not in parameter_class_map:\n raise TypeError(\"obj must be model object with parameters, not {0}\".format(t))\n vote = parameter_class_map[t]['vote']\n q = (Q(parameter_val__parameter__obj = obj)&\n Q(parameter_val__status = status)&\n Q(parameter_val__value = value))\n if uuid != None:\n q &= Q(parameter_val__parameter__uuid = uuid)\n else:\n if tpclass == 'user':\n if not isinstance(name, basestring):\n raise ValueError('You must specify `name` of parameter, if tpclass == \"user\"')\n elif not isinstance(tpclass, basestring):\n raise ValueError('You must specify `tpclass` if `uuid` is not specified')\n q &= Q(parameter_val__parameter__tpclass = tpclass)\n if name != None:\n q &= Q(parameter_val__parameter__name = name)\n\n ret = []\n for vt in vote.objects.filter(q).distinct().all():\n ret.append(vt.voter)\n return ret" ]
[ "0.83463514", "0.8317188", "0.75515354", "0.71140134", "0.6328592", "0.6223597", "0.6184133", "0.6064872", "0.5983577", "0.59810215", "0.59462565", "0.58576053", "0.58417827", "0.57884955", "0.5786087", "0.57498264", "0.5686267", "0.5686267", "0.5666457", "0.5636173", "0.5636173", "0.5598468", "0.5593741", "0.55630326", "0.55273944", "0.5526266", "0.55240256", "0.5466031", "0.5449262", "0.54444295" ]
0.8563331
0
Gets the number of upvotes made on the object by all users
def get_upvotes(self, obj): content_type = ContentType.objects.get_for_model(obj) votes = self.filter(content_type=content_type, object_id=obj._get_pk_val(), vote__exact=UPVOTE).aggregate(upvotes=Sum('vote')) if votes['upvotes'] is None: votes['upvotes'] = 0 return votes['upvotes']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_upvotes(self):\n return self.filter(value=1).count()", "def get_total_upvotes(self, suggestions, main_suggestion):\n total_upvoted_users = main_suggestion.upvoted_users.all()\n for suggestion in suggestions.all():\n total_upvoted_users |= suggestion.upvoted_users.all()\n\n return total_upvoted_users.distinct()", "def upvote(self, request, pk=None):\n post = self.get_object()\n post.upvotes += 1\n post.save()\n serializer = self.get_serializer(post)\n return Response(serializer.data, status.HTTP_200_OK)", "def count_votes(self):\n return self.annotate(sum=Sum('value'))", "def get_interested_users(self, obj):\n return obj.interested_users.count()", "def count_downvotes(self):\n return self.filter(value=-1).count()", "def num_votes(self):\n return sum(self.votes_per_count)", "def nay_voter_cnt(self):\n\n return len(self._nay_voters())", "def up_vote(cls, user, message):\r\n pass", "def vote_count(self):\n return QuestionVotes.objects.filter(question=self).count()", "def get_user_votes(user_id: int) -> int:\n session = Session()\n\n # get user by id to ensure user exists\n get_user_by_id(user_id)\n # count votes for the user that haven't expired\n user_votes: int = session.query(Vote)\\\n .filter(Vote.user_id == user_id)\\\n .filter(Vote.vote_expiry > datetime.datetime.now()).count()\n\n session.close()\n\n return user_votes", "def present_voter_cnt(self):\n\n return len(self._present_voters())", "def up_vote(cls, user, message):\n pass", "def abstain_voter_cnt(self):\n\n return len(self._abstain_voters())", "def upvote(self):\n url = \"https://api.imgur.com/3/gallery/{0}/vote/up\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')", "def recalculate_popularity(self):\n self.voters = 0\n for x in self.votes:\n self.voters += 1\n if x.good:\n self.popularity += 1\n else:\n self.popularity -= 1", "def get_amount_users() -> User:\n return User.objects.all().count()", "def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def get_counts(self, obj: User):\n uploader = obj.id\n public_count = Image.objects.filter(uploader=uploader, is_private=False, is_profile_image=False).count()\n private_count = Image.objects.filter(uploader=uploader, is_private=True, is_profile_image=False).count()\n liked_count = Image.objects.filter(likes__id=uploader).count()\n \n return {\n \"public\": public_count,\n \"private\": private_count,\n \"liked\": liked_count,\n }", "def get_vote_count(self, post):\n return post.vote_set.count()", "def yay_voter_cnt(self):\n\n return len(self._yay_voters())", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def get_downvotes(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n\n votes = self.filter(content_type=content_type, object_id=obj._get_pk_val(), vote__exact=DOWNVOTE).aggregate(downvotes=Sum('vote'))\n\n if votes['downvotes'] is None:\n votes['downvotes'] = 0\n\n return -votes['downvotes']", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def get_score(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n result = self.filter(content_type=content_type,\n object_id=obj._get_pk_val()).aggregate(\n score=Sum('vote'),\n num_votes=Count('vote'))\n #It may happen that there has been no voting on this object so far.\n if result['score'] is None:\n result['score'] = 0\n\n result['upvotes'] = self.get_upvotes(obj)\n result['downvotes'] = self.get_downvotes(obj)\n\n return result", "def count_karma(user):\r\n\r\n karma = 0\r\n posts = Post.objects.filter(author=user).all()\r\n for post in posts:\r\n karma += (int(post.likes.count()) - int(post.dislikes.count()))\r\n \r\n return karma", "def oneup_count(self):\n return self.oneups.filter(Oneup.state >= 0).count()", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def get_counts(self, obj: User):\n uploader = obj.id\n public_count = Image.objects.filter(uploader=uploader, is_private=False, is_profile_image=False).count()\n \n return {\n \"public\": public_count,\n }", "def up_vote():\n review_id = review_id = request.form.get('review_id')\n\n mongo.db.reviews.update_one(\n {'_id': ObjectId(review_id)}, {\"$inc\": {\"up_vote\": 1}})\n\n up_vote = mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)},\n {\"up_vote\": 1, \"_id\": 0})\n\n return jsonify({\"up_vote\": up_vote['up_vote'], \"success\": True})" ]
[ "0.8053709", "0.7087659", "0.6881225", "0.6833426", "0.6637999", "0.6490542", "0.6454659", "0.63448167", "0.63270116", "0.631257", "0.62989783", "0.6298624", "0.62497115", "0.6242473", "0.62353253", "0.6199192", "0.61926293", "0.61874974", "0.61717516", "0.6157083", "0.6111635", "0.6098047", "0.60496587", "0.6028986", "0.59865016", "0.59849185", "0.5975095", "0.59709096", "0.59574586", "0.59492177" ]
0.80163234
1
Gets the number of downvotes on the object by all users
def get_downvotes(self, obj): content_type = ContentType.objects.get_for_model(obj) votes = self.filter(content_type=content_type, object_id=obj._get_pk_val(), vote__exact=DOWNVOTE).aggregate(downvotes=Sum('vote')) if votes['downvotes'] is None: votes['downvotes'] = 0 return -votes['downvotes']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_downvotes(self):\n return self.filter(value=-1).count()", "def count_upvotes(self):\n return self.filter(value=1).count()", "def get_upvotes(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n\n votes = self.filter(content_type=content_type, object_id=obj._get_pk_val(), vote__exact=UPVOTE).aggregate(upvotes=Sum('vote'))\n\n if votes['upvotes'] is None:\n votes['upvotes'] = 0\n\n return votes['upvotes']", "def get_user_votes(user_id: int) -> int:\n session = Session()\n\n # get user by id to ensure user exists\n get_user_by_id(user_id)\n # count votes for the user that haven't expired\n user_votes: int = session.query(Vote)\\\n .filter(Vote.user_id == user_id)\\\n .filter(Vote.vote_expiry > datetime.datetime.now()).count()\n\n session.close()\n\n return user_votes", "def count_votes(self):\n return self.annotate(sum=Sum('value'))", "def negative_votes(self):\n return self._get(\"negative_votes\")", "def get_number_of_non_exhausted_votes(self):\n return (\n len(self._ballots) * self._number_of_votes_pr_voter\n - self._number_of_blank_votes\n )", "def nay_voter_cnt(self):\n\n return len(self._nay_voters())", "def count_of_downgrades(self) -> Optional[float]:\n return pulumi.get(self, \"count_of_downgrades\")", "def get_total_upvotes(self, suggestions, main_suggestion):\n total_upvoted_users = main_suggestion.upvoted_users.all()\n for suggestion in suggestions.all():\n total_upvoted_users |= suggestion.upvoted_users.all()\n\n return total_upvoted_users.distinct()", "def yay_voter_cnt(self):\n\n return len(self._yay_voters())", "def get_voters():", "def get_voters():", "def num_votes(self):\n return sum(self.votes_per_count)", "def downvote(self):\n url = \"https://api.imgur.com/3/gallery/{0}/vote/down\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')", "def downvote(self) -> Response:\n self.force_authenticate_user()\n response = self.downvote_question()\n return response", "def _vote(self, neighbor_labels):\n counts= torch.bincount(neighbor_labels.int())\n return torch.argmax(counts)", "def calculate_vote_fractions():\n return _calculate_vote_fractions(models.get_candidate_to_vote_count())", "def down_vote():\n review_id = request.form.get('review_id')\n\n mongo.db.reviews.update_one(\n {'_id': ObjectId(review_id)}, {\"$inc\": {\"down_vote\": 1}})\n\n down_vote = mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)},\n {\"down_vote\": 1, \"_id\": 0})\n\n return jsonify({\"down_vote\": down_vote['down_vote'], \"success\": True})", "def get_interested_users(self, obj):\n return obj.interested_users.count()", "def get_vote_count(self, post):\n return post.vote_set.count()", "def get_all_votes(self) -> List[dict]:", "def get_amount_users() -> User:\n return User.objects.all().count()", "def get_vote_tally(self):\r\n voters = []\r\n tally = {}\r\n for b in reversed(self.blocks):\r\n if b.user_id not in voters and type(b) == VoteBlock:\r\n voters.append(b.user_id)\r\n if b.choice in tally.keys():\r\n tally[b.choice] += 1\r\n else:\r\n tally[b.choice] = 1\r\n result = []\r\n for key in tally:\r\n d = {}\r\n d['name'] = key\r\n d['count'] = tally[key]\r\n result.append(d)\r\n return result", "def dislikes(self):\n return self.get_queryset().filter(vote__lt=0)", "def downvote_handler(request, id: str) -> JsonResponse:\n\n # validate that required fields are present\n data: dict = json.loads(request.body)\n for field in [\"topic\", \"additional_downvotes\"]:\n if data.get(field) is None:\n return JsonResponse({\"error\": f\"missing JSON field '{field}'\"},\n status=HTTPStatus.BAD_REQUEST)\n\n # Increment the upvotes in\n # 1) Mongo\n # 2) ElasticSearch\n new_downvote_count = downvote_document(\n topic=data[\"topic\"],\n additional_downvotes=int(data[\"additional_downvotes\"]),\n id=id\n )\n \n return JsonResponse(\n {\"id\": id, \"topic\": data[\"topic\"], \"new_downvote_count\": new_downvote_count}, \n status=status.HTTP_200_OK\n )", "def count_of_upgrades_after_downgrades(self) -> Optional[float]:\n return pulumi.get(self, \"count_of_upgrades_after_downgrades\")", "def count_votes(self, neighbours=()):\n labels = []\n data = neighbours\n # create the list made up of labels.\n for x in range(len(data)):\n labels.append(data[x][-1])\n\n # count the appearance of labels.\n count = [[x, labels.count(x)] for x in set(labels)]\n # Sort the labels in descending order by using their frequency\n vote = sorted(count, key=itemgetter(-1), reverse=True)\n # return the prediction\n # print(\"[{}]\".format(vote[0][0]))\n return vote[0][0]", "def count_karma(user):\r\n\r\n karma = 0\r\n posts = Post.objects.filter(author=user).all()\r\n for post in posts:\r\n karma += (int(post.likes.count()) - int(post.dislikes.count()))\r\n \r\n return karma", "def total_pulls(self) -> int:\n return self.__total_pulls" ]
[ "0.762536", "0.6708461", "0.6653797", "0.6466711", "0.6283814", "0.59941554", "0.5965759", "0.5957265", "0.59383214", "0.58846676", "0.58008784", "0.5792616", "0.5792616", "0.5763257", "0.5761557", "0.5698882", "0.5678017", "0.5666268", "0.5663854", "0.56521255", "0.5651946", "0.563916", "0.5625435", "0.56178844", "0.55763364", "0.5558333", "0.5554161", "0.55436397", "0.55433613", "0.5526693" ]
0.7843331
0
Append additional fields to the self.list_display.
def get_list_display(self, request): list_display = self.list_display if 'admin_created' not in list_display: list_display += ('admin_created', ) if 'admin_modified' not in list_display: list_display += ('admin_modified', ) return list_display
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_list_display(self, request):\n delete = partial(self.remove, request=request)\n delete.short_description = \"\"\n delete.allow_tags = True\n list_display = list(self.list_display)\n for index, field_name in enumerate(list_display):\n field = getattr(self.model, field_name, None)\n if hasattr(field, \"related\"):\n list_display.remove(field_name)\n list_display.insert(\n index, self.display_add_link(request, field.related))\n list_display.append(delete)\n return list_display", "def appendedEntries(self):\n self.contact_list.append({\"name\": self.first_name.title() + \" \" + self.last_name.title(), \"phone number\": self.phone_number, \"phone number type\": self.phone_number_type})", "def get_list_display(self, request):\n list_display = []\n for field_name in self.list_display:\n try:\n db_field = self.model._meta.get_field(field_name)\n if isinstance(db_field, BooleanField):\n field_name = boolean_switch_field(db_field)\n except FieldDoesNotExist:\n pass\n list_display.append(field_name)\n return list_display", "def add_fields(self, *fields: Field):\n self.fields.extend(fields)", "def _append_customfield_fields(self):\n for customfield in self._get_custom_fields(self._get_content_type()):\n if customfield.ui_visibility == CustomFieldVisibilityChoices.VISIBILITY_HIDDEN:\n continue\n\n field_name = f'cf_{customfield.name}'\n self.fields[field_name] = self._get_form_field(customfield)\n\n # Annotate the field in the list of CustomField form fields\n self.custom_fields[field_name] = customfield\n if customfield.group_name not in self.custom_field_groups:\n self.custom_field_groups[customfield.group_name] = []\n self.custom_field_groups[customfield.group_name].append(field_name)", "def display_fields(self):\r\n\r\n field_text = self.show_fields()\r\n field_text_list = field_text.split(EOL)[0:-1]\r\n\r\n def fld_format (x_temp):\r\n\r\n x_temp = x_temp.split(COLON)[0], x_temp.split(COLON)[1]\r\n\r\n \"\"\"formats output of the list of search results\"\"\"\r\n\r\n if not isinstance(x_temp[1],str):\r\n shown_indexes = rangelist.range_find([int(Index(a_temp))\r\n for a_temp in x_temp[1]],reduce=True)\r\n else:\r\n shown_indexes = x_temp[1]\r\n\r\n if len(shown_indexes) < 20:\r\n return (abridge(x_temp[0]).replace(VERTLINE,SLASH)\r\n +VERTLINE\r\n +shown_indexes)\r\n\r\n\r\n returnlist = []\r\n sp_temp = rangelist.split_up_range(shown_indexes)\r\n\r\n\r\n returnlist.append(x_temp[0].replace(VERTLINE,SLASH)[0:min([60,len(x_temp[0])])]\r\n +VERTLINE+sp_temp[0])\r\n for s_temp in sp_temp[1:]:\r\n returnlist.append(VERTLINE+s_temp)\r\n\r\n return returnlist\r\n\r\n show_list(field_text_list,\r\n alerts.FIELDS[3:],0,40,\r\n func=fld_format,\r\n present=True,\r\n display=display)", "def add_fields(self, fields):\n for label, data in fields.items():\n self[label] = data", "def additional_fields(self, additional_fields):\n\n self._additional_fields = additional_fields", "def list(self,**kwargs):\n # import pdb;pdb.set_trace()\n g.title = \"{} Record List\".format(g.title)\n \n self.select_recs(**kwargs)\n \n # ensure that the field list is complete\n self.has_search_fields = False #default state\n self.set_list_fields(self.list_fields)\n \n if self._ajax_request:\n self.list_template = self.list_table_template\n \n return render_template(self.list_template,\n data = self,\n session_fields = ListFilter(), # provides the session field constants\n **kwargs,\n )", "def repopulate(self):\n new_items = self._list_populate_function()\n\n new_set = set(new_items.values() if isinstance(new_items, dict) else new_items)\n\n if len(new_items) != len(self._display_list):\n if isinstance(new_items, dict):\n # for dictionaries store the key as user role data\n for key in sorted(new_items.keys()):\n item = new_items[key]\n if item not in self._display_list:\n self.list_widget.addItem(item)\n self.list_widget.item(self.list_widget.count() - 1).setData(Qt.UserRole, key)\n else:\n for item in new_items:\n if item not in self._display_list:\n self._add_item(item)\n self._display_list = sorted(set(new_set) | set(self._display_list))", "def _add_fields(self, fields):\n for field in fields:\n self.add(field)", "def copy_fields(self, model):\n fields = super(HistoricalRecords, self).copy_fields(model)\n for name, field in self.additional_fields.items():\n assert name not in fields\n assert hasattr(self, 'get_%s_value' % name)\n fields[name] = field\n return fields", "def add_field(self, **kwargs):\n field = {\n 'name': kwargs.get('name'),\n 'value': kwargs.get('value'),\n 'inline': kwargs.get('inline', False)\n }\n\n self.fields.append(field)", "def __str__(self):\n #Format data from default fields\n template = \"{number:4}|{rep:4}|{time:5}{priority:+2}|\" \\\n \"{record_type:8}|{name:17}\"\n default_fields = template.format(**self)\n \n #Format data from custom fields\n custom_field_list = []\n for label in self.custom_labels:\n custom_field_list.append(\"|{:17}:{!s:<5}\".format(label,\n self[label]))\n custom_fields = \"\".join(custom_field_list)\n \n return default_fields + custom_fields", "def __init__(self, *args, **kwargs):\n super(ListFieldType, self).__init__(*args, **kwargs)\n\n self.item_info = self.field_info.get('items')", "def get_list_display(self, *args, **kwargs):\n list_display = super(\n BitemporalModelAdmin, self).get_list_display(*args, **kwargs)\n return list(list_display) + [\n 'valid_datetime_start', 'valid_datetime_end',\n 'transaction_datetime_start', 'transaction_datetime_end']", "def add_list(self):\n the_list = models.List(user_id=1,\n list_name=self.test_list,\n description=self.test_list_desc)\n the_list.add()", "def appendAndOwn(self, *args):\n return _libsbml.ListOf_appendAndOwn(self, *args)", "def _set_default_list_fields(self,include_all=False):\n default_list_fields = []\n col_num = -1\n max_cols = 5 if not include_all else 99999\n \n for col in self.table.get_column_names():\n if len(default_list_fields) > max_cols:\n break\n \n if col[-3:].lower() == '_id' and not include_all:\n # foreign key\n continue\n \n col_num += 1\n \n default_list_fields.append({\n 'name':'{}'.format(col),\n # limit the number of visible fields on small screen\n 'class':'{}'.format('w3-hide-small' if len(default_list_fields) == 0 or len(default_list_fields) > 3 else ''), \n })\n \n return default_list_fields", "def add_field(self, field, field_data):\n self.extra_fields[field] = field_data", "def listMetaDataFields(self, exclude=True):\n #tool = getToolByName(self, ATCT_TOOLNAME)\n #original_list = tool.getMetadataDisplay(exclude)\n\n return DisplayList((\n ('getAnalysisCategory', _p('Analysis Category')),\n ('getAnalysisService', _p('Analysis Service')),\n ('getAnalysts', _('Analyst')),\n ('getClientOrderNumber', _('Client Order')),\n ('getClientReference', _('Client Reference')),\n ('getClientSampleID', _('Client Sample ID')),\n ('getClientTitle', _('Client')),\n ('getContactTitle', _('Contact')),\n ('Creator', _p('Creator')),\n ('created', _('Date Created')),\n ('getDatePublished', _('Date Published')),\n ('getDateReceived', _('Date Received')),\n ('getDateSampled', _('Date Sampled')),\n ('getProfilesTitle', _('Analysis Profiles')),\n ('getRequestID', _('Request ID')),\n ('getSampleID', _('Sample ID')),\n ('getSamplePointTitle', _('Sample Point')),\n ('getSampleTypeTitle', _('Sample Type')),\n ('review_state', _p('Review state')),\n ))", "def get_columns(self, request, cl):\n columns = []\n for field_name in cl.model_admin.list_display:\n text, _ = label_for_field(field_name, cl.model, model_admin=cl.model_admin, return_attr=True)\n columns.append({field_name: text})\n return columns", "def misclist_build(self):\n\n self.MiscList.ClearAll()\n\n # Add column headers if necessary.\n if self.MiscList.GetColumnCount() == 0:\n self.MiscList.InsertColumn(0, 'Name', width=76)\n self.MiscList.InsertColumn(1, 'Value', width=67)\n\n misc_values = list(self.patch.engine.misc_data.values())\n for misc_index in range(len(misc_values)):\n misc_value = misc_values[misc_index]\n\n self.MiscList.InsertItem(misc_index, misc_value['name'])\n\n self.misclist_update_row(misc_index)\n\n self.list_autosize(self.MiscList)\n self.MiscList.Select(0, True)", "def __init__(self, *args, **kwargs):\n super(LockingAdminMixin, self).__init__(*args, **kwargs)\n if 'is_locked' not in self.list_display:\n if hasattr(self.list_display, 'append'):\n self.list_display.append('is_locked', )\n else:\n self.list_display = self.list_display + ('is_locked', )\n\n opts = self.model._meta\n self._model_info = (opts.app_label, opts.model_name)", "def changelist_view(self, request, extra_context=None):\n if request.user.has_perm('deflect.list_all'):\n self.list_filter = self._list_filter + ('creator__username',)\n self.list_display = self._list_display + ('creator',)\n else:\n self.list_filter = self._list_filter\n self.list_display = self._list_display\n return super(ShortURLAdmin, self).changelist_view(request, extra_context=extra_context)", "def get_extra_fields(self, model, fields):\n extra_fields = super(HistoricalRecords, self).get_extra_fields(\n model, fields)\n related_name = 'historical_' + model._meta.verbose_name_plural.lower()\n extra_fields['history_changeset'] = models.ForeignKey(\n 'Changeset', related_name=related_name)\n return extra_fields", "def admin(cls, fields=(), exclude=(), list_display=()):\n list_display = ('id','title','get_description','url')\n if hasattr(cls, 'list_display'):\n list_display = getattr(cls, 'list_display')\n else:\n list_display = ('id','title','get_description','url')\n if hasattr(cls, 'fields'):\n fields += tuple([field.name for field in cls._meta.fields \\\n if isinstance(field, (models.CharField, models.TextField))])\n #fields += getattr(cls, 'fields')\n if hasattr(cls, 'exclude'):\n exclude += getattr(cls, 'exclude')\n return type(\"%sAdmin\" % cls.__name__, (CoreDataModelAdmin,), dict(\n fields=fields, exclude=exclude,\n list_display=list_display,\n list_display_links=('id',)))", "def build_list_field(self, field_name: str, field: dict):\n field_layout = [sg.Text(self.build_label_text(field_name, field), size=(15, 1)),\n sg.Listbox(field.get(\"options\"), default_values=field.get(\"default\"), size=(20, 4),\n enable_events=False, key=field_name)]\n\n return field_layout", "def __str_additional_info_nvps__(self):\n return [\n ('order_update', str(self.order_update)),\n ('nickname', repr(self.nickname)),\n ('item', str(self.item))\n ]", "def __append_to_item_list(self):\n Item.get_item_list().append(self)" ]
[ "0.6392549", "0.6073667", "0.6061075", "0.59967446", "0.5952772", "0.587858", "0.57881814", "0.57162535", "0.5623907", "0.55740386", "0.556877", "0.5475366", "0.54707587", "0.5459969", "0.54526514", "0.54518634", "0.54348624", "0.5419972", "0.5400993", "0.5349254", "0.53316104", "0.5329953", "0.53107214", "0.5302106", "0.5292066", "0.5272656", "0.5260095", "0.5250068", "0.5243122", "0.52419525" ]
0.6505207
0
Ask the user if he wants to reboot and use adhoc reboot command
def choose_reboot(): while True: choice = input("Would you like to reboot now ? [y\\N] ") if choice.lower() == 'n' or choice == '': return elif choice.lower() == 'y': break else: continue if os.name == 'nt': call('shutdown /r /t 00') else: call('reboot')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def system_reboot(self):\n confirmation = input(\"Do you want to reboot the camera? (Y or N)\\n\")\n if confirmation in ('Y', 'y'):\n return self.mycam.devicemgmt.SystemReboot()\n return None", "def reboot(self,request):\n\t\tresult = True\n\t\tPopen(['/sbin/reboot']) # that's all\n\t\tself.finished(request.id,result)", "def reboot(self):\n module = 'reboot'\n method = 'POST'\n print(self.device + ' Calling reboot command on the device')\n response = self.axapi_call(module, method,'')\n if '2' in str(response.status_code):\n print(self.device + ' Reboot command successfully received, device will reboot momentarily, please wait')\n else:\n print(self.device + ' There was an error in issuing the reboot command, device may not have rebooted, please verify manually')", "async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))", "def reboot():\n if not required():\n return \"Kernel reboot not required\"\n cmd_str = 'shutdown -r +1 \"Server is going down for kernel upgrade\"'\n Popen([cmd_str], shell=True, stdin=None,\n stdout=None, stderr=None, close_fds=True)\n return cmd_str", "def external_reboot(info):\n\n text, reason = info\n SysTools.reboot(reason)", "def sudo_restart ( self, ):\r\n pass\r\n \"sudo reboot\"", "def reboot(host=None):\r\n if host:\r\n host.reboot()", "def reboot():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><restart><system></system></restart></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def IssueReboot():\n if sys.platform.startswith('win'):\n subprocess.call(['shutdown', '-r', '-f', '-t', '1'])\n elif sys.platform in ('darwin', 'posix', 'linux2'):\n subprocess.call(['sudo', 'shutdown', '-r', 'now'])\n else:\n raise NotImplementedError('Implement IssueReboot function '\n 'for %s' % sys.platform)", "async def reboot(self) -> None:\n await self._api.call('system', 'reboot')", "def reboot(pi):\n command = \"ssh {0} 'sudo reboot'\".format(pi)\n subprocess.Popen(command, shell=True)\n print(\"Rebooting {0}\".format(pi))", "def reboot(self, client, sec):\r\n result = client.reboot(sec)\r\n return result", "def reboot_trima(runner):\r\n runner.AddCommand(\"reboot\\n\",'',False)\r\n runner.Run()\r\n runner.ResetCommands()", "def reboot(self):\n self.check_state('reboot')\n try:\n self.newportxps.reboot(reconnect=False, timeout=120.0)\n except Exception:\n pass", "def reboot(self):\n self.resetStream()\n logger.info(\"Going to reboot %s\" % self)\n self.setMode(CLI_MODES.shell)\n self._session.sendline(\"reboot\")\n reboot_failed_tries = 3\n reboot_wait_tries = 3\n while True:\n i = self._session.expect([\n \"The system is going down for reboot\",\n \"System shutdown initiated\",\n \"Connection to [\\.\\d]* closed\",\n pexpect.EOF,\n \"Request failed\",\n pexpect.TIMEOUT,\n ], timeout=120)\n if i == 0 or i == 1:\n logger.info(\"Reboot initiated\")\n continue\n elif i == 2 or i == 3:\n logger.info(\"Machine Rebooted. Connection closed\")\n break\n elif i == 4:\n if reboot_failed_tries > 0:\n logger.info(\"Reboot failed. Trying again...\")\n self._session.sendline(\"reload force\")\n reboot_failed_tries -= 1\n continue\n elif i == 5:\n if reboot_wait_tries > 0:\n logger.warn(\"Waited for 120 secs, but machine did NOT reboot. Waiting for sometime more...\")\n self._session.sendline(\"reload force\")\n reboot_wait_tries -= 1\n continue\n else:\n logger.error(\"Machine did NOT reboot!!!\")\n return False\n # break to prevent infinite loop\n break\n\n self._session.logfile_read.flush()\n self._session.logfile_read = None\n sys.stdout.flush()\n self.disconnect()\n logger.debug(\"Waiting for 300secs..\")\n\n time.sleep(300)\n return self.waitTillReachable(180, timeout=1800)", "def reboot(self, *args, **kwargs):\n log_tag = self.get_log_tag()\n self.logger.info(\"{} Attempting to reset the Treerunner board\"\n \"\".format(log_tag))\n cmd = \"shutdown > /dev/null 2>&1\"\n self.exec_command_ssh(cmd, background=True)\n self.logger.info(\"{} Waiting for the Treerunner board to come\"\n \" back online\".format(log_tag))\n time.sleep(30)\n # Start the sshd server daemon\n self.start_sshd_server()", "def reboot(self):\n raise NotImplementedError", "def ReallyReboot():\n Log('Reboot: Starting system reboot cycle')\n UpdateSignals()\n i = 0\n try:\n while True:\n Log('Reboot: Reboot cycle %d' % i)\n IssueReboot()\n Sleep(60)\n i += 1\n except:\n Log('Reboot: failed to issue a reboot: %s' % str(sys.exc_info()[0]))\n raise", "def reboot(self, wait_for_reload=False, **kwargs):\n if kwargs.get(\"confirm\"):\n log.warning(\"Passing 'confirm' to reboot method is deprecated.\")\n\n try:\n first_response = self.show(\"reload\")\n\n if \"System configuration\" in first_response:\n self.native.send_command_timing(\"no\")\n\n try:\n self.native.send_command_timing(\"\\n\", read_timeout=10)\n except ReadTimeout as expected_exception:\n log.info(\"Host %s: Device rebooted.\", self.host)\n log.info(\"Hit expected exception during reload: %s\", expected_exception.__class__)\n if wait_for_reload:\n time.sleep(10)\n self._wait_for_device_reboot()\n except Exception as err:\n log.error(err)\n log.error(err.__class__)", "def reboot(self, node):", "def reboot(self):\n self.gripper_io.set_signal_value(\"reboot\", True)", "def reboot(self, save_before_reboot=False) -> tuple[int, AnyStr]:\n return 5, gettext(\"Reboot not ready\")", "def reboot(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.reboot_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True", "def reboot():\n sudo('/mnt/apps/bin/restart-all-apache.sh')", "def reboot(miner: Miner, login):\n connection = Ssh(miner.ipaddress, login.username, login.password, port=getportfromminer(miner))\n connection.open_shell()\n response = connection.exec_command('/sbin/reboot')\n print_connection_data(connection)\n connection.close_connection()\n return response", "def supports_reboot(self):\n self.__not_implemented()", "def reboot(*args, **kwargs):\n try:\n master.main_exit()\n except Exception:\n log.error(\"main_exit error\")\n with open('/tmp/reboot', 'w+') as f:\n f.write(\"REBOOT\")\n log.info(\"Reboot ...\")", "async def reboot(self, ctx: Message):\n\t\tif ctx.author.id == ownerid:\n\t\t\towner_check = True\n\t\telse:\n\t\t\towner_check = False\n\n\t\tif owner_check == True:\n\t\t\tawait self.send(\"Rebooting the bot, I'll be back in 5 seconds!\")\n\t\t\tawait self.close()\n\t\t\tawait asyncio.sleep(4)\n\t\t\tclient = DogeClient(DOGETOKEN,\n\t\t\t DOGEREFRESHTOKEN,\n\t\t\t prefix=\"d!\",\n\t\t\t reconnect_voice=True)\n\t\t\tclient.run()\n\t\t\tawait asyncio.sleep(1)\n\t\t\tawait self.send(\"I'm back online\")\n\t\telse:\n\t\t\treturn await self.send(\n\t\t\t f\"{ctx.author.mention} You are not the owner of the bot so you may not reboot it!\"\n\t\t\t)", "def doReboot(self):\n logging.info(\"%s doReboot\", ModuleName)\n if CB_CELLULAR_BRIDGE:\n try:\n Popen([\"/usr/bin/modem3g/sakis3g\", \"--sudo\", \"disconnect\"])\n except Exception as ex:\n logging.warning(\"%s deReboot. sakis3g disconnect failed\", ModuleName)\n logging.warning(\"%s Exception: %s %s\", ModuleName, type(ex), str(ex.args))\n try:\n self.cbSendManagerMsg({\"msg\": \"stopall\"})\n except Exception as ex:\n logging.warning(\"%s Cannot tell manager to stop, just rebooting\", ModuleName)\n logging.warning(\"%s Exception: %s %s\", ModuleName, type(ex), str(ex.args))\n # Tidy up\n #self.mgrPort.stopListening()\n reactor.callLater(REBOOT_WAIT, self.reboot)" ]
[ "0.7725278", "0.7503559", "0.7453776", "0.73682946", "0.7227929", "0.7211871", "0.7188908", "0.717906", "0.7136062", "0.7126615", "0.69618595", "0.6878219", "0.683283", "0.6796591", "0.67501", "0.67242324", "0.66379255", "0.66211015", "0.66104424", "0.6596329", "0.645953", "0.6398451", "0.63884234", "0.63876474", "0.6371085", "0.633895", "0.630595", "0.6288022", "0.62868756", "0.6283476" ]
0.80323696
0
Returns a list of 25 random tweets from the authenticated user's lists.
def grab_tweets(): tweets = [] long_tweets = [] for each in lists: tweets = tweets + twitter.GetListTimeline(list_id=each.id, count=count, include_rts=True) for tweet in tweets: if len(tweet.text) >= min_tweet_len: long_tweets.append(tweet) shuffle(long_tweets) if len(long_tweets) >= num_tweets: return long_tweets[:num_tweets] else: return long_tweets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def get_random_tweets(n):\r\n sample = list(mongo_coll_tweets.aggregate([{'$sample': {'size': n}}]))\r\n\r\n return sample", "def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets", "def get_tweets(self):\r\n now = datetime.datetime.now()\r\n tweet_json = self.api.get_tweets(self.last, now)\r\n self.last = now\r\n return [Tweet(x) for x in tweet_json]", "def getTweetsByUser(username, maxTweets=1000):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)", "async def get_tweets(self, ctx, username: str, count: int):\n cnt = count\n if count > 25:\n cnt = 25\n\n if username is not None:\n if cnt < 1:\n await self.bot.say(\"I can't do that, silly! Please specify a \\\n number greater than or equal to 1\")\n return\n msg_list = []\n api = self.authenticate()\n try:\n for status in\\\n tw.Cursor(api.user_timeline, id=username).items(cnt):\n if not status.text.startswith(\"@\"):\n msg_list.append(status)\n except tw.TweepError as e:\n await self.bot.say(\"Whoops! Something went wrong here. \\\n The error code is \" + str(e))\n return\n if len(msg_list) > 0:\n await self.tweet_menu(ctx, msg_list, page=0, timeout=30)\n else:\n await self.bot.say(\"No tweets available to display!\")\n else:\n await self.bot.say(\"No username specified!\")\n return", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def get_five_random(self):\r\n if self.get_length() > 5:\r\n random_selection = []\r\n\r\n from random import randrange\r\n\r\n for i in range(0, 5):\r\n while True:\r\n rnd = randrange(0, self.get_length())\r\n if self.get_tweet(rnd) not in random_selection:\r\n random_selection.append(self.get_tweet(rnd))\r\n break\r\n return random_selection\r\n else:\r\n return self.tweets", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def get_tweets(api, listOfTweets, keyword, numOfTweets=20, date_since='2019-1-1', lang=\"en\"):\n spinner = yaspin()\n spinner.start()\n for tweet in tweepy.Cursor(api.search, q=keyword, lang=lang, since=date_since).items(numOfTweets):\n # Add tweets in this format\n dict_ = {'Screen Name': tweet.user.screen_name,\n 'User Name': tweet.user.name,\n 'Tweet Created At': str(tweet.created_at),\n 'Tweet Text': tweet.text,\n 'Cleaned Tweet Text': func.clean_tweets(tweet.text),\n 'User Location': str(tweet.user.location),\n 'Tweet Coordinates': str(tweet.coordinates),\n 'Retweet Count': str(tweet.retweet_count),\n 'Retweeted': str(tweet.retweeted),\n 'Phone Type': str(tweet.source),\n 'Favorite Count': str(tweet.favorite_count),\n 'Favorited': str(tweet.favorited),\n 'Replied': str(tweet.in_reply_to_status_id_str)\n }\n listOfTweets.append(dict_)\n spinner.stop()\n return listOfTweets", "def ajax_get_random_tweets(n):\r\n return dumps(get_random_tweets(int(n)))", "def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets", "def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1", "def get_tweet_list(user_handle):\n client = language.LanguageServiceClient()\n\n tweet_list = twitter.get_tweets(handle=user_handle)\n\n if tweet_list[0] == \"34\":\n return tweet_list\n\n for i in range(len(tweet_list)):\n\n content = tweet_list[i].get(\"text\")\n\n document = types.Document(\n content=content, type=enums.Document.Type.PLAIN_TEXT)\n annotations = client.analyze_sentiment(document=document)\n\n # Print the results\n # print_result(annotations)\n\n score = annotations.document_sentiment.score\n magnitude = annotations.document_sentiment.magnitude\n\n tweet_list[i][\"score\"] = score\n tweet_list[i][\"magnitude\"] = magnitude\n\n # print(tweet_list[i])\n\n return tweet_list", "def get_tweets(self):\r\n return self.tweets", "def get_random_tweets(sqlite_db, twt_tbl, auth_tbl, auth_id, num_req, rnd_seed):\n conn = sqlite3.connect(sqlite_db)\n c = conn.cursor()\n # get the number of tweets available for a given author and select threshold + 1 for experiments\n # get number of tweets\n num_twts = get_num_tweets(sqlite_db, auth_tbl, auth_id)\n # print(num_twts)\n # random seed for reproducing experimental results\n random.seed(rnd_seed)\n # list of message id's to use in testing\n message_list = random.sample(range(1, num_twts), num_req)\n print(message_list)\n # build the sql statement\n param = '?'\n params = ','.join(param*len(message_list))\n sql = \"SELECT TWEET_MSG FROM {tn} WHERE AUTHOR_ID='{a_id}' AND MESSAGE_NUM IN ({prms})\".\\\n format(tn=twt_tbl, a_id=auth_id, prms=params)\n print(sql)\n # c.execute('SELECT TWEET_MSG FROM {tn} WHERE AUTHOR_ID=\"{a_id}\" AND MESSAGE_NUM IN \"{m_lst}\"'. \\\n # format(tn=twt_tbl, a_id=auth_id), m_lst=','.join(['?']*len(message_list)))\n c.execute(sql,message_list)\n conn.commit()\n twts = c.fetchall()\n # printing the tweets to validate selection\n # for tweet_tup in twts:\n # for tweet in tweet_tup:\n # print(tweet.rstrip())\n conn.close()\n return(twts)", "def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200", "def userTweets(username):\n api = twitter.Api()\n user_tweets = api.GetUserTimeline(username)\n for tweet in user_tweets:\n util.safe_print(tweet.GetText())", "def map_tweepy_list (self, tweets):\n tweets_lists = [[tweet.created_at,\n tweet.id,\n tweet.id_str,\n tweet.truncated,\n tweet.text,\n str(constants.TRACKS),\n tweet.source,\n tweet.source_url,\n tweet.in_reply_to_status_id,\n tweet.in_reply_to_status_id_str,\n tweet.in_reply_to_user_id,\n tweet.in_reply_to_user_id_str,\n tweet.in_reply_to_screen_name,\n tweet.user.screen_name,\n tweet.user.location,\n tweet.geo,\n tweet.coordinates,\n tweet.place,\n tweet.contributors,\n tweet.is_quote_status,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.favorited,\n tweet.retweeted,\n tweet.lang ] for tweet in tweets]\n\n return tweets_lists", "def get_tweets():\n\n return Tweet.query.all()", "def load_tweets(self, max_items=10000, user=None):\n for name, info in self.users.items():\n try:\n os.mkdir(self.root + info['party'].lower().replace(' ', '_'))\n except FileExistsError:\n pass\n \n filepath = self.root + info['party'].lower().replace(' ', '_')\n filepath = filepath + '/' + name.lower().replace(' ', '')\n try:\n print(f'Reading tweets from {name}')\n user = info['screen_name']\n curs = tweepy.Cursor(self.api.user_timeline,\n screen_name=user,\n count=200,\n tweet_mode=\"extended\"\n ).items(max_items)\n\n with open(filepath + '.jsonl', 'w') as f:\n for status in curs:\n tweet = status._json\n json_dump_line(tweet, f)\n \n except tweepy.TweepError as exc:\n print(exc)\n os.remove(filepath + '.jsonl')", "def fetch_tweets(n_tweets=100, data_home=None, token=None, tweets_ids=None):\n pass", "def get_tweets_count_times(twitter, count, query=None):\n # get id to start from\n oldest_id, newest_id = _get_oldest_id(query=query)\n newest_id = newest_id or oldest_id\n\n all_tweets = []\n i = 0\n while i < count:\n i += 1\n # use search api to request 100 tweets. Twitter returns the most recent (max_id) first\n if oldest_id <= newest_id:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, count=TWEETS_PER_SEARCH, twitter=twitter)\n else:\n tweets = get_tweets(query=query, max_id=oldest_id - 1, since_id=newest_id, count=TWEETS_PER_SEARCH, twitter=twitter)\n rate_limit_remaining = twitter.get_lastfunction_header('x-rate-limit-remaining')\n rate_limit_reset = twitter.get_lastfunction_header('x-rate-limit-reset')\n\n if not len(tweets):\n # not rate limitted, just no tweets returned by query\n oldest_id = oldest_id + ((newest_id or oldest_id) - oldest_id + 1) * 10000\n break\n elif isinstance(tweets, dict):\n # rate limit hit, or other twython response error\n print(tweets)\n break\n\n all_tweets.extend(tweets)\n\n # determine new oldest id\n tweet_ids = {t['id'] for t in tweets}\n if oldest_id:\n tweet_ids.add(oldest_id)\n oldest_id, newest_id = min(tweet_ids), max(tweet_ids)\n if rate_limit_remaining == 1:\n time.sleep(rate_limit_reset - time.time())\n\n save_tweets(all_tweets, query=query)\n\n # set id to start from for next time\n _set_oldest_id(oldest_id, newest_id, query=query)\n\n if len(all_tweets) == 0:\n os.remove(make_oldest_id_path(query))\n\n return len(all_tweets), twitter.get_lastfunction_header('x-rate-limit-remaining')", "def get_tweets():\n clean_tweetdb.delay()\n db_tweets = Tweet.objects.all()\n max_id = min([tweet.tweet_id for tweet in db_tweets])\n tweets = api.search(\n q='#python',\n max_id=max_id,\n count=100\n )\n tweets_id = [tweet.id for tweet in tweets]\n tweets_date = [tweet.created_at for tweet in tweets]\n tweets_source = [tweet.source for tweet in tweets]\n tweets_favorite_cnt = [tweet.favorite_count for tweet in tweets]\n tweets_retweet_cnt = [tweet.retweet_count for tweet in tweets]\n tweets_text = [tweet.text for tweet in tweets]\n\n for i, j, k, l, m, n in zip(\n tweets_id,\n tweets_date,\n tweets_source,\n tweets_favorite_cnt,\n tweets_retweet_cnt,\n tweets_text,\n ):\n try:\n Tweet.objects.create(\n tweet_id=i,\n tweet_date=j,\n tweet_source=k,\n tweet_favorite_cnt=l,\n tweet_retweet_cnt=m,\n tweet_text=n,\n )\n except IntegrityError:\n pass", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def get_user_tweets(api, screen_name, output_path):\n logger = logging.getLogger(__name__)\n logger.info('Pulling tweets')\n\n # Create empty list for tweet objects\n tweets = []\n # Pulls users must recent 200 tweets\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n # Continues to pull tweets 200 at a time until limit is hit\n while len(new_tweets) > 0:\n new_tweets = api.user_timeline(screen_name=screen_name,\n count=200, max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n logger.info(\"...%s tweets downloaded and cleaned\" % (len(tweets)))\n\n # Write all text of tweets to a file\n filename = screen_name + '.csv'\n file = open(join(output_path, filename), 'w')\n\n # Iterates through all tweets and cleans them before outputting\n for tweet in tweets:\n clean_tweet = clean_string(tweet.text)\n line = screen_name + ', ' + clean_tweet + '\\n'\n file.write(line)\n logger.info(\"Done pulling tweets for %s\" % screen_name)\n file.close()", "def get_users_tweets(users, min_date, max_date, result_limit, key, secret_key):\n \n auth = tweepy.OAuthHandler(key, secret_key)\n max_datetime = datetime.datetime.strptime(max_date, '%Y-%m-%d').date()\n min_datetime = datetime.datetime.strptime(min_date, '%Y-%m-%d').date()\n \n #initialize variables\n max_id = None\n min_id = None\n mydata = []\n\n for user in users:\n my_api = tweepy.API(auth)\n\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break\n\n while min_id == None:\n start_id = item.id\n statuses = my_api.user_timeline(screen_name=user,\n count=result_limit,\n max_id=start_id,\n tweet_mode = 'extended',\n include_retweets=True\n )\n for item in statuses: \n if item.created_at.date() > max_datetime:\n max_id = item.id\n #max_id_date = item.created_at\n elif min_datetime <= item.created_at.date() <= max_datetime:\n mydata.append(get_tweet_info(item))\n if max_id == None:\n max_id = item.id\n else: #less than min_datetime\n min_id = item.id\n #min_id_date = item.created_at\n break \n #get another 25 starting with the max... \n # if min_id is None... then call again... using the bottom of mydata as max_id...\n\n df = pd.DataFrame(mydata).loc[:,'tweet_id':'favourite_count']\n return df", "def getTwitterUsers(users,credentials=False):\n userList = ','.join(users)\n chain(twitterCall.s('lookup_user',{'screen_name':userList},credentials), pushTwitterUsers.s())()" ]
[ "0.72753966", "0.6963853", "0.695688", "0.6755268", "0.6748927", "0.67321634", "0.67147475", "0.6649648", "0.6629068", "0.661428", "0.65991986", "0.657056", "0.6557654", "0.65160143", "0.64997166", "0.6480752", "0.64145756", "0.6398489", "0.6357325", "0.6351", "0.6348374", "0.63409466", "0.62345344", "0.62174994", "0.6210045", "0.61991423", "0.6194108", "0.61681354", "0.6160788", "0.6146907" ]
0.7435396
0
Returns a single randomly selected tweet.
def choose_tweet(pos_tweets): tweet = choice(pos_tweets) return tweet
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handler(event, context):\n send_tweet(random.choice(potential_tweets))", "def handler(event, context):\n send_tweet(random.choice(potential_tweets))", "def handler(event,context):\n send_tweet(random.choice(potential_tweets))", "def mock_tweet():\n count = random.randint(70, 140)\n return ''.join([random.choice(string.letters) for i in xrange(count)])", "def get_tweet(self, id):\r\n return self.tweets[id]", "def pick_word(self):\n self.chosen_word = random.choice(self.words_list)\n return self.chosen_word", "def get_random_phrase():\n return random.choices(PHRASES, WEIGHTS, k=1)[0]", "def chosen():\n wordList = loadWords()\n w = random.choice(wordList)\n word = w[:-1]\n return word", "def ajax_get_random_tweets(n):\r\n return dumps(get_random_tweets(int(n)))", "def return_word():\n wordlist = load_words()\n word = random.choice(wordlist)\n return word", "def chooseRandomSentence(self):\r\n return self.__repo.chooseObject()", "def get_random_tweets(sqlite_db, twt_tbl, auth_tbl, auth_id, num_req, rnd_seed):\n conn = sqlite3.connect(sqlite_db)\n c = conn.cursor()\n # get the number of tweets available for a given author and select threshold + 1 for experiments\n # get number of tweets\n num_twts = get_num_tweets(sqlite_db, auth_tbl, auth_id)\n # print(num_twts)\n # random seed for reproducing experimental results\n random.seed(rnd_seed)\n # list of message id's to use in testing\n message_list = random.sample(range(1, num_twts), num_req)\n print(message_list)\n # build the sql statement\n param = '?'\n params = ','.join(param*len(message_list))\n sql = \"SELECT TWEET_MSG FROM {tn} WHERE AUTHOR_ID='{a_id}' AND MESSAGE_NUM IN ({prms})\".\\\n format(tn=twt_tbl, a_id=auth_id, prms=params)\n print(sql)\n # c.execute('SELECT TWEET_MSG FROM {tn} WHERE AUTHOR_ID=\"{a_id}\" AND MESSAGE_NUM IN \"{m_lst}\"'. \\\n # format(tn=twt_tbl, a_id=auth_id), m_lst=','.join(['?']*len(message_list)))\n c.execute(sql,message_list)\n conn.commit()\n twts = c.fetchall()\n # printing the tweets to validate selection\n # for tweet_tup in twts:\n # for tweet in tweet_tup:\n # print(tweet.rstrip())\n conn.close()\n return(twts)", "def get_randword():\n with open('/home/sarga/text_words.txt','r') as f:\n rword = f.read().split(\" \")\n return random.choice(rword)", "def get_five_random(self):\r\n if self.get_length() > 5:\r\n random_selection = []\r\n\r\n from random import randrange\r\n\r\n for i in range(0, 5):\r\n while True:\r\n rnd = randrange(0, self.get_length())\r\n if self.get_tweet(rnd) not in random_selection:\r\n random_selection.append(self.get_tweet(rnd))\r\n break\r\n return random_selection\r\n else:\r\n return self.tweets", "def get_random_phrase(self):\n return random.choice(self.phrases)", "def pick_random_word():\r\n # open the sowpods dictionary\r\n with open(\"resources/ex30/sowpos.txt\", 'r') as f:\r\n words = f.readlines()\r\n\r\n # generate a random index\r\n # -1 because len(words) is not a valid index into the list `words`\r\n index = random.randint(0, len(words) - 1)\r\n\r\n # print out the word at that index\r\n word = words[index].strip()\r\n return word", "def get_random_string(t):\n return t[random.randint(0, len(t)-1)]", "def randomWord(wordList):\n return random.choice(wordList)", "def randomWord(wordList):\n return random.choice(wordList)", "def randomWord(wordList):\n return random.choice(wordList)", "def get_word(self):\r\n\r\n # Get a unique word anyway\r\n if not self.word_count or random.random() > self.new_word_chance:\r\n self.word_count += 1\r\n return self.create_word() \r\n else:\r\n word_choice = random.randrange(0, self.word_count)\r\n try:\r\n return self.words[word_choice]\r\n except IndexError:\r\n return self.create_word()", "def get_random_tweets(n):\r\n sample = list(mongo_coll_tweets.aggregate([{'$sample': {'size': n}}]))\r\n\r\n return sample", "def get_random_word(self):\n pass", "def random_sent(self, index):\n t1, t2 = self.get_corpus_line(index)\n if random.random() > 0.5:\n label = 0\n else:\n try:\n t2_temp = self.get_random_line()\n t2 = t2_temp\n label = 1\n except:\n label = 0\n\n assert len(t1) > 0\n assert len(t2) > 0\n return t1, t2, label", "def one(self):\n count = self.aggregate(count=Count('id'))['count']\n\n if count < 2:\n raise ValueError('not enough words')\n\n idx = randint(0, count - 1)\n\n return self.all()[idx]", "def random_word(wordlist):\n return random.choice(wordlist)", "def get_tweet(username, n):\n return twitterAPI.home_timeline(count=n)[-1:][0] # return specified tweet", "def get_word():\n index = random.randrange(3)\n print(index)\n if index == 0:\n return 'HAPPIE'\n elif index == 1:\n return 'PYTHON'\n else:\n return 'COMPUTER'", "def getRandom(self):\n return random.choice(self.data)", "def get_random_song(self):\n return random.choice(self.song_list)" ]
[ "0.69579417", "0.69579417", "0.69258714", "0.6577981", "0.63306475", "0.6282631", "0.6218992", "0.6208195", "0.6181834", "0.6129136", "0.61065483", "0.60549045", "0.6049556", "0.60462", "0.6039426", "0.6018565", "0.5995151", "0.59862554", "0.59862554", "0.59862554", "0.59790576", "0.59604895", "0.59547687", "0.5908486", "0.590341", "0.5895983", "0.58846974", "0.5809524", "0.57854605", "0.57289964" ]
0.7728843
0
Authenticated user likes all tweets in pos_tweets.
def like_tweets(pos_tweets): for tweet in pos_tweets: twitter.CreateFavorite(status_id=tweet.id) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def like_tweet(self, tag):\n self.bot.get('https://twitter.com/search?q=' + tag + '&src=typed')\n self.__wait(3, 3)\n for i in range(1, 3):\n self.bot.execute_script('window.scrollTo(0,document.body.scrollHeight)')\n self.__wait(2, 3)\n tweets = self.bot.find_elements_by_tag_name('article')\n\n links = []\n for tweet in tweets:\n sub_links = tweet.find_elements_by_tag_name('a')\n links += [sub_link.get_attribute('href')\n for sub_link in sub_links if 'status' in sub_link.get_attribute('href')]\n\n print('Started to like {} tweets'.format(len(links)))\n\n for link in links:\n self.bot.get(link)\n self.__wait(3, 5)\n likes = self.bot.find_elements_by_css_selector('div[data-testid=\"like\"')\n for like in likes:\n like.click()\n self.__wait(3, 5)", "def filter_pos_tweets(tweets):\n\n pos_tweets = []\n\n for tweet in tweets:\n sentiment = unirest.post(\"https://japerk-text-processing.p.mashape.com/sentiment/\",\n headers={\n \"X-Mashape-Key\": os.environ['X_MASHAPE_KEY'],\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\"\n },\n params={\n \"language\": \"english\",\n \"text\": tweet.text\n }\n )\n if (sentiment.body['probability']['neg'] <= max_neg) & (sentiment.body['probability']['pos'] >= min_pos):\n pos_tweets.append(tweet)\n log_sentiment(tweet, sentiment)\n\n return pos_tweets", "def get_user_likes(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT user_id FROM user_like WHERE post_id = {self.id}\")\n user_likes = tuple(map(lambda x: str(x['user_id']), cursor.fetchall()))\n if not user_likes:\n return []\n cursor.execute(f\"SELECT username FROM user WHERE id IN ({', '.join(user_likes)})\")\n users = cursor.fetchall()\n cursor.close()\n return list(map(lambda x: x['username'], users))", "def like_tweet(tweet_id):\n twitter.create_favorite(id=tweet_id)", "async def get_tweets(self, ctx, username: str, count: int):\n cnt = count\n if count > 25:\n cnt = 25\n\n if username is not None:\n if cnt < 1:\n await self.bot.say(\"I can't do that, silly! Please specify a \\\n number greater than or equal to 1\")\n return\n msg_list = []\n api = self.authenticate()\n try:\n for status in\\\n tw.Cursor(api.user_timeline, id=username).items(cnt):\n if not status.text.startswith(\"@\"):\n msg_list.append(status)\n except tw.TweepError as e:\n await self.bot.say(\"Whoops! Something went wrong here. \\\n The error code is \" + str(e))\n return\n if len(msg_list) > 0:\n await self.tweet_menu(ctx, msg_list, page=0, timeout=30)\n else:\n await self.bot.say(\"No tweets available to display!\")\n else:\n await self.bot.say(\"No username specified!\")\n return", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def get_likes_list(self, username):\n api = self.api\n api.searchUsername(username) \n result = api.LastJson\n username_id = result['user']['pk'] #Gets the user ID\n user_posts = api.getUserFeed(username_id) # gets the user feed\n result = api.LastJson\n media_id = result['items'][0]['id'] #gets the most recent post\n api.getMediaLikers(media_id) #gets users who liked\n users = api.LastJson('users')\n for user in users: #appends the users to the list\n users_list.append({'pk':user['pk'], 'username':user['username']})", "def users_being_followed_tweets():\n username = request.authorization.username\n tweets = []\n\n user_id = get_user_id(username);\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id and (\n user.user_id = ? or\n user.user_id in (select whom_id from follower\n where who_id = ?))\n order by message.pub_date desc limit ?''',\n [user_id, user_id, PER_PAGE])\n\n for tuple in tuples:\n tweet = {}\n tweet[\"message_id\"] = tuple['message_id']\n tweet[\"author_id\"] = tuple['author_id']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweets.append(tweet)\n\n return jsonify({'tweets': tweets}), 200", "def like_user_posts(self, user:str, n_posts:int, like:bool=True):\n\n action = 'Like' if like else 'Unlike'\n\n self._nav_user(user)\n\n imgs = []\n elements = self._find_element(EC.presence_of_all_elements_located((By.CLASS_NAME, '_9AhH0')))\n imgs.extend(elements)\n\n for img in imgs[:n_posts]:\n img.click() \n time.sleep(1) \n try:\n self.driver.find_element_by_xpath(\"//*[@aria-label='{}']\".format(action)).click()\n except Exception as e:\n LOGGER.error(e)\n\n self.driver.find_elements_by_class_name('ckWGn')[0].click()", "def userTweets(username):\n api = twitter.Api()\n user_tweets = api.GetUserTimeline(username)\n for tweet in user_tweets:\n util.safe_print(tweet.GetText())", "def reply_to_tweets():\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n mentions = api.mentions_timeline(\n last_seen_id,\n tweet_mode='extended')\n\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text, flush=True)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id, FILE_NAME)\n for i in range(len(keywords)):\n if keywords[i] in mention.full_text.lower():\n print(\"responding back to: \" + '@' +\n mention.user.screen_name, flush=True)\n api.update_status('@' + mention.user.screen_name + ' ' +\n salts[i], mention.id)", "def get_tweets(self, query, count=10):\n # empty list to store parsed tweets\n tweets = []\n\n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q=query, count=count)\n\n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n\n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n\n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n\n # return parsed tweets\n return tweets\n\n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))", "def analyze_tweets(tweets, model, w2v_model):\n # TODO DO EVERYTHING HERE\n #tweets = [(\"StarWars\", tc.query_tweets(\"StarWars\"))]\n \n #tweets = tc.query_tweets('starwars')\n df = pd.DataFrame(columns=['pos', 'neu', 'neg'])\n if not os.path.isdir('results'):\n os.mkdir('results')\n for topic, topic_tweets in tweets:\n tokenized_tweets = tp.process_raw_tweets(topic_tweets)\n df.loc[topic], dummy = classify_tweets(tokenized_tweets, model, w2v_model)\n vis.word_cloud_from_frequencies(tp.count_tokens(tokenized_tweets), f\"results/{topic}_cloud.png\", width=800, height=400,)\n \n vis.bar_plot_from_dataframe(df, 'results/results.png')\n print(\"\\n\")\n print(df)", "def get_all_likes(obj):\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)", "def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200", "def users_likes(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n # likes = Message.query.filter(Message.user_id.notin_(users_blocking)).all()\n user = User.query.get_or_404(user_id)\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/likes.html', user=user, likes=likes)", "def post(self):\n liked = self.request.get('like')\n unliked = self.request.get('unlike')\n post_id = self.request.get('post_id')\n post = Posts.get_by_id(int(post_id))\n user = self.get_active_user()\n user_id = int(user.key().id())\n\n if liked:\n if user_id in post.liked_by:\n self.render_improper_endpoint_access(\"like\")\n else:\n if post.submitter_id != user_id:\n post.liked_by.append(user.key().id())\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)\n elif unliked:\n if user_id in post.liked_by:\n index = post.liked_by.index(user_id)\n del post.liked_by[index]\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(500)", "def like(self, data_base, user):\n cursor = data_base.cursor()\n cursor.execute(f\"UPDATE post SET likes = likes + 1 WHERE id = '{self.id}'\") # Increments the likes\n cursor.execute(f\"INSERT INTO user_like (user_id, post_id) VALUES ({user.id}, {self.id})\")\n if self.commit_to_db:\n data_base.commit()\n cursor.close()", "def ListLikes(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_user_likes(self) -> int:\n return -1", "def send_like(request):\n if request.method == \"POST\":\n if \"token\" in request.data and request.data[\"token\"] != \"\" and request.data[\"token\"] is not None:\n if Token.objects.filter(key=request.data[\"token\"]).exists():\n token = get_object_or_404(Token, key=request.data[\"token\"])\n if Post.objects.filter(pk=request.data[\"post_id\"]).exists():\n post = Post.objects.get(pk=request.data[\"post_id\"])\n if Like.objects.filter(post=post, user_id=token.user_id).exists():\n return Response({\"error\": 31})\n else:\n post.count_likes += 1\n post.save()\n like = Like.objects.create(post=post, user_id=token.user_id)\n serializer = PostSerializer(post, context={'user_id': token.user_id})\n UserFeed.objects.create(user=post.author,\n action_user=token.user,\n like=like,\n action=\"Like\")\n if post.author != token.user:\n message = \"{} likes your post\".format(token.user.username)\n custom = {\n \"post_id\": post.id,\n \"avatar\": UserProfile.objects.get(user=token.user).avatar.url\n }\n\n user_notification = UserNotification.objects.get(user=post.author)\n send_notification(custom, message, user_notification)\n\n return Response({\"success\": 30,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 32})\n else:\n return Response({\"error\": 17})", "def list_tweets():\n tweets = []\n tuples = query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE])\n for tuple in tuples:\n tweet = {}\n tweet[\"username\"] = tuple['username']\n tweet[\"email\"] = tuple['email']\n tweet[\"text\"] = tuple['text']\n tweet[\"pub_date\"] = tuple['pub_date']\n tweets.append(tweet)\n return jsonify({'tweets':tweets}),200", "def set_analyzed_tweets(self, tweets):\n slim_tweets = [SlimTweet(tweet) for tweet in tweets]\n self.analyzed_tweets = sort_tweets(slim_tweets)", "def show_likes(user_id):\n\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n # define user whose favorites are being viewed\n profuser = User.query.get_or_404(user_id)\n # define logged-in user for navbar details\n user = User.query.get(session[CURRENT_USER_KEY])\n if session[CURRENT_USER_KEY] == user_id:\n like_active = 'active'\n else:\n like_active = ''\n\n return render_template('likes.html', user=user, profuser=profuser, likes=profuser.likes, like_active=like_active)", "def displayAlsoLike(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"11925205\", result.data)", "def get_user_liked(user, status):\n return models.Favorite.objects.filter(user=user, status=status).exists()", "def like_following(self):\n self.logger.log(\"starting like_following...\")\n count_following = self.account.follows_count\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n acc = perform_with_ran_delay(self.instagram.get_account_by_id, acc)\n self.logger.log(\" {} > {} posts\".format(acc.username, acc.media_count))\n if acc.media_count > 0:\n\n posts = perform_with_ran_delay(self.instagram.get_medias, acc.username, 50)\n if posts:\n for m in posts:\n try:\n perform_with_ran_delay(self.instagram.like, m.identifier)\n self.logger.log(\"liking 1 post from \"+acc.username)\n random_delay()\n except Exception as e:\n self.logger.log(\"skipping 1 post from \"+acc.username)\n self.logger.log(e)\n random_delay()\n continue", "def show_likes(user_id):\n\n\n user = User.query.get_or_404(user_id)\n\n return render_template('users/likes.html', user=user)", "def is_liked(obj, user) ->bool:\n\tif not user.is_authenticated:\n\t\treturn False\n\tobj_type = ContentType.objects.get_for_model(obj):\n\tlikes = Like.objects.filter(\n\t\tcontent_type = obj_type, object_id=obj.id, user=user)\n\treturn likes.exists()\n\n\tdef get_all_likes(obj):\n\t\t\"\"\"\n\t\t\tGets all users, who liked object\n\t\t\"\"\"\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)", "def latest_likes(self, user, number_posts, likes):\n WAIT = 1\n if likes:\n action = 'Like'\n else:\n action = 'Unlike'\n self.nav_user(user)\n image_container = []\n image_container.extend(self.driver.find_elements_by_class_name('_9AhH0'))\n for image in image_container[:number_posts]:\n image.click()\n time.sleep(WAIT)\n try:\n self.driver.find_element_by_xpath(\"//*[@aria-label='{}']\".format(action).click())\n except Exception as e:\n print(e)\n self.driver.find_elements_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]/button')[0].click() # clicks the heart symbol\n time.sleep(WAIT)\n self.driver.find_elements_by_xpath('/html/body/div[4]/div[3]/button')[0].click() #Makes sure to close out of current picture\n time.sleep(WAIT)\n \n # Tested\n users_list = []\n def get_likes_list(self, username):\n \"\"\"\n Method gets a list of users who like a post\n\n \"\"\"\n api = self.api\n api.searchUsername(username) \n result = api.LastJson\n username_id = result['user']['pk'] #Gets the user ID\n user_posts = api.getUserFeed(username_id) # gets the user feed\n result = api.LastJson\n media_id = result['items'][0]['id'] #gets the most recent post\n api.getMediaLikers(media_id) #gets users who liked\n users = api.LastJson('users')\n for user in users: #appends the users to the list\n users_list.append({'pk':user['pk'], 'username':user['username']})" ]
[ "0.64643073", "0.6083664", "0.60599387", "0.59699297", "0.5918097", "0.5902641", "0.58915097", "0.57170844", "0.56932944", "0.56672335", "0.56636506", "0.56403744", "0.5627154", "0.562145", "0.56145626", "0.55933243", "0.5592352", "0.5581902", "0.5573323", "0.5570764", "0.5568061", "0.5562018", "0.55527556", "0.55165535", "0.5502987", "0.54911464", "0.5491007", "0.547235", "0.5471614", "0.54681313" ]
0.79324496
0
Authenticated user retweets tweet.
def retweet(tweet): twitter.PostRetweet(tweet.id, trim_user=False) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def retweet(tweet_id):\n r = requests.post(twitter_api_base + \"/statuses/retweet/\" +\n tweet_id + \".json\",\n auth=oauth_credentials)\n if r.status_code != 200:\n print(\"Attempted to retweet tweet %s\" % tweet_id)\n received_error(r)\n else:\n print(\"Successfully retweeted tweet %s.\" % tweet_id)", "def get(self, request):\n if request.user.is_authenticated:\n if not request.user.consumer_key and not request.user.consumer_secret and not request.user.oauth_token and \\\n not request.user.oauth_token_secret:\n return Response({\"message\": \"Kindly supply the twitter authentication keys in the admin dashboard\"},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n api = load_api(request)\n try:\n my_tweets = api.user_timeline()\n except tweepy.TweepError as e:\n return Response({\"message\": e.args[0][0]['message']}, status=status.HTTP_400_BAD_REQUEST)\n tweet_list = []\n for tweet in my_tweets:\n tweet_list.append(tweet.text)\n return Response({'message': tweet_list}, status=status.HTTP_200_OK)\n else:\n return Response({\"message\": \"Kindly create an account and log in first\"},\n status=status.HTTP_400_BAD_REQUEST)", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n print(e.message)\n else:\n log(\"Tweeted: \" + text)\n print(\"Tweeted: \" + text)", "def tweet(self, tweet, at=None):\n if tweet.strip() == \"\":\n return\n\n num_tweets, tweets = self._divide_tweet(tweet, at)\n if num_tweets > 0:\n # replace @'s with #'s and convert unicode emojis before tweeting\n [self.api.update_status(tw.replace(\"@\", \"#\").encode(\"utf-8\")) for tw in tweets]\n self.log(f\"Tweeted: {' '.join(tweets)}\")\n return tweets[0]", "def tweet(text):\n # Twitter authentication\n auth = tweepy.OAuthHandler(C_KEY, C_SECRET)\n auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n log(e.message)\n else:\n log(\"Tweeted: \" + text)", "def rt(result):\n #Works only on real retweets (no replies with RT chain)\n return result.retweeted", "def tweet(message):\n auth = load_twitter_auth()\n key = environ['TWITTER_ACCESS_KEY']\n secret = environ['TWITTER_ACCESS_SECRET']\n auth.set_access_token(key, secret)\n api = tweepy.API(auth)\n api.update_status(message)\n print(message)", "def execute(self, *args, **kwargs):\n try:\n self.timeline_tweets = self.api.user_timeline(\n kwargs['screen_name'])\n except TweepError as user_timeline_error:\n print(user_timeline_error)\n self.user_timeline_tweets_status = False", "def post(self, request):\n if request.user.is_authenticated:\n if not request.user.consumer_key and not request.user.consumer_secret and not request.user.oauth_token and \\\n not request.user.oauth_token_secret:\n return Response({\"message\": \"Kindly supply the twitter authentication keys in the admin dashboard\"},\n status=status.HTTP_400_BAD_REQUEST)\n else:\n tweets = request.data.get('tweets', None)\n if tweets is not None:\n api = load_api(request)\n try:\n api.update_status(tweets)\n except tweepy.TweepError as e:\n return Response({\"message\": e.args[0][0]['message']}, status=status.HTTP_400_BAD_REQUEST)\n return Response({\"message\": \"Your tweets has been updated\"}, status=status.HTTP_201_CREATED)", "def get_tweets(api):\n return api.user_timeline()", "def tweet(self):\n self.__refresh_local_tweets()\n\n if not self.tweets:\n return\n\n tweet_obj = self.tweets[0]\n\n # upload picture\n media_id = self.__upload_media(tweet_obj[\"img\"])\n\n # tweet with text, and image\n if not media_id:\n return\n self.__post_status(tweet_obj[\"text\"], media_id)\n\n self.tweets.remove(tweet_obj)\n self.tweeted.append(tweet_obj)\n self.__update_local_tweets()", "def TweetHandler(self):\n self.response.out.write('<br/><br/>Tweeting<br/>')\n self.response.out.write('this info will be tweeted:<br/>')\n # oldest non-tweeted and prepared\n oldest_changeset = Changeset.all().order('created_at').filter('is_tweeted =', False).filter('is_prepared =', True).fetch(1)\n if not oldest_changeset:\n self.response.out.write('nothing to tweet')\n return\n else:\n c = oldest_changeset[0]\n \n config = get_config()\n\n # do not tweet from localhost\n if not 'localhost' in self.request.url:\n auth = tweepy.OAuthHandler(config[\"consumer_key\"], config[\"consumer_secret\"])\n auth_data = OAuthAccessToken.all().filter('specifier =', config[\"twitter_username\"]).fetch(1)[0]\n auth.set_access_token(auth_data.oauth_token, auth_data.oauth_token_secret)\n self.response.out.write('<br/>tweeting with oauth:<br/>')\n api = tweepy.API(auth)\n self.response.out.write(\"id: %d\" % c.id)\n self.response.out.write(\"user: %s\" % c.user)\n self.response.out.write(\"comment: %s\" % c.comment)\n self.response.out.write(\"tweet: %s\" % c.tweet)\n try:\n api.update_status(c.tweet)\n except tweepy.error.TweepError, e: \n self.response.out.write( 'failed: %s' % e.reason )\n if \"Status is a duplicate\" in e.reason:\n c.is_tweeted = True\n c.put()\n return\n else:\n self.response.out.write('<br/>localhost - nothing actually tweeted:')\n\n self.response.out.write('<br/>%s' % c.tweet)\n\n c.is_tweeted = True\n c.put()", "def get_tweets(self):\r\n return self.tweets", "def on_tweet(self, tweet):\n pass", "async def tweet():\n with logger.contextualize(request_id=str(uuid.uuid4())):\n tweets = generate()\n upload(tweets)", "async def get_tweets(self, ctx, username: str, count: int):\n cnt = count\n if count > 25:\n cnt = 25\n\n if username is not None:\n if cnt < 1:\n await self.bot.say(\"I can't do that, silly! Please specify a \\\n number greater than or equal to 1\")\n return\n msg_list = []\n api = self.authenticate()\n try:\n for status in\\\n tw.Cursor(api.user_timeline, id=username).items(cnt):\n if not status.text.startswith(\"@\"):\n msg_list.append(status)\n except tw.TweepError as e:\n await self.bot.say(\"Whoops! Something went wrong here. \\\n The error code is \" + str(e))\n return\n if len(msg_list) > 0:\n await self.tweet_menu(ctx, msg_list, page=0, timeout=30)\n else:\n await self.bot.say(\"No tweets available to display!\")\n else:\n await self.bot.say(\"No username specified!\")\n return", "def tweet(user):\n api = get_api(user)\n msg = 'I used hackt to follow @hackerschool batches on twitter. You can too at http://bit.ly/hs_hackt'\n\n try:\n api.PostUpdate(msg)\n except twitter.TwitterError as error:\n return {'msg': error.message[0]['message']}", "def send_tweet(tweet_text):\n twitter.update_status(status = tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)", "def handler(event,context):\n tweet = setup_and_get_tweet()\n send_tweet(tweet)", "def twitter_login():\n auth = twitter.OAuth(access_token, access_token_secret, consumer_key, consumer_secret)\n return twitter.Twitter(auth=auth, retry=True)", "def userTweets(username):\n api = twitter.Api()\n user_tweets = api.GetUserTimeline(username)\n for tweet in user_tweets:\n util.safe_print(tweet.GetText())", "def command_tweet(self, bot, update):\n\n bot.sendChatAction(update.message.chat_id, action='typing')\n\n tweet = ext.get_last_tweet(self.config['twitter'])\n\n for url in tweet.get('images', []):\n self.send_photo_url(bot, update, url)\n\n messages = [\n u'{text}',\n '[@{user[screen_name]}](https://twitter.com/{user[screen_name]}) '\n '- {ago}'\n ]\n\n for msg in messages:\n self.send_message(bot, update, msg.format(**tweet))", "def on_success(self, status):\n # print(status['text'], status['id'])\n if is_wcw(status): # Use function for testing the phrase\n try:\n api.retweet(id=status['id'])\n except TwythonError:\n pass", "def get_user_tweets(api, screen_name, output_path):\n logger = logging.getLogger(__name__)\n logger.info('Pulling tweets')\n\n # Create empty list for tweet objects\n tweets = []\n # Pulls users must recent 200 tweets\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n # Continues to pull tweets 200 at a time until limit is hit\n while len(new_tweets) > 0:\n new_tweets = api.user_timeline(screen_name=screen_name,\n count=200, max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n logger.info(\"...%s tweets downloaded and cleaned\" % (len(tweets)))\n\n # Write all text of tweets to a file\n filename = screen_name + '.csv'\n file = open(join(output_path, filename), 'w')\n\n # Iterates through all tweets and cleans them before outputting\n for tweet in tweets:\n clean_tweet = clean_string(tweet.text)\n line = screen_name + ', ' + clean_tweet + '\\n'\n file.write(line)\n logger.info(\"Done pulling tweets for %s\" % screen_name)\n file.close()", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def get_tweets(self, user, count):\n topTweetsList = self.api.user_timeline(screen_name=user, count=count, tweet_mode='extended')\n clnTweets = {}\n for tweet in topTweetsList:\n clnTweets[processTweet(getNonRetweet(tweet))] = ({'like':getFavoriteCount(tweet),'RT':getNumRetweet(tweet),'follower':getNumFollowers(tweet)}) \n\n tweetTxt = [twt for twt in clnTweets.keys()]\n \n if user in self.userTweetsStat:\n self.userTweetsStat[user].append(clnTweets)\n else:\n tmp = []\n tmp.append(clnTweets)\n self.userTweetsStat[user] = tmp\n return tweetTxt, self.userTweetsStat", "def reply_to_tweets():\n last_seen_id = retrieve_last_seen_id(FILE_NAME)\n mentions = api.mentions_timeline(\n last_seen_id,\n tweet_mode='extended')\n\n for mention in reversed(mentions):\n print(str(mention.id) + ' - ' + mention.full_text, flush=True)\n last_seen_id = mention.id\n store_last_seen_id(last_seen_id, FILE_NAME)\n for i in range(len(keywords)):\n if keywords[i] in mention.full_text.lower():\n print(\"responding back to: \" + '@' +\n mention.user.screen_name, flush=True)\n api.update_status('@' + mention.user.screen_name + ' ' +\n salts[i], mention.id)" ]
[ "0.66826576", "0.66580725", "0.664445", "0.6615652", "0.6569027", "0.6567879", "0.6546092", "0.65177256", "0.65040976", "0.6480737", "0.645635", "0.64364004", "0.6425227", "0.64131695", "0.6408457", "0.63748854", "0.63633084", "0.63615364", "0.63195086", "0.63195086", "0.63195086", "0.63019854", "0.6286318", "0.6260644", "0.6215908", "0.6204038", "0.620328", "0.61799616", "0.616489", "0.6163183" ]
0.76832384
0
Wrap an html code str inside a div.
def add_div_around_html(div_html_text, output_string=False, div_style="{width: 80%}"): div = f"""<div style="{div_style}">{div_html_text}</div>""" if output_string: return div #get_ipython().set_next_input(div, 'markdown') else: return Markdown(div)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_div(string, cls):\n return html_simple_element(string, \"div\", 'class=\"%s\"' % cls) + \"\\n\"", "def get_html(html: str):\r\n WRAPPER = \"\"\"<div style=\"overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem\">{}</div>\"\"\"\r\n # Newlines seem to mess with the rendering\r\n html = html.replace(\"\\n\", \" \")\r\n return WRAPPER.format(html)", "def as_iframe(self, html_data):\n\n srcdoc = html_data.replace('\"', \"'\")\n return ('<iframe id=\"{div_id}\", srcdoc=\"{srcdoc}\" style=\"width: {width}; '\n 'height: {height};\"></iframe>'.format(\n div_id=self.div_id,\n srcdoc=srcdoc,\n width=self.width,\n height=self.height))", "def wrap(self, source, outfile):\r\n return self._wrap_div(self._wrap_pre(self._wrap_code(source)))", "def htmlise(s):\n return '<div><pre class=\"tablecell\">' + html.escape(s) + '</pre></div>'", "def NewDiv(width=default_width, height=default_height):\n\n global wid\n wid = uuid.uuid4().hex\n print('Display id = {}JS9'.format(wid))\n fmt = dict(url=default_root, port0=default_port_html, wid=wid, width=width, height=height)\n html_command = \"\"\"\n <iframe src='{url}:{port0}/{wid}' width='{width}' height='{height}'>\n </iframe>\n \"\"\".format(**fmt)\n get_ipython().run_cell_magic('html', '', html_command)", "def htmlDiv(id, contents='', attr='', keepEmptyDiv=True):\n if contents or keepEmptyDiv:\n if id:\n return '<div id=\"%s\"%s>\\n%s</div>\\n' % (id,sep(attr),contents)\n else:\n return '<div%s>\\n%s</div>\\n' % (sep(attr),contents)\n else:\n return ''", "def _wrap_code(self, inner):\r\n yield 0, \"<code>\"\r\n for tup in inner:\r\n yield tup\r\n yield 0, \"</code>\"", "def inject_on_div(div_id, original, content_to_inject):\n m = re.match(\".*(<div.*id=\\\"\"+ div_id +\"\\\".*>)(.*)(</div>).*\", original, re.DOTALL)\n return re.sub(m.group(1) + m.group(2) + m.group(3), m.group(1) + content_to_inject + m.group(3) , original)", "def wrap(self, wrap):\n return f\"| {wrap} |\"", "def _render_response_msg_html(self, response_msg):\r\n # First try wrapping the text in a <div> and parsing\r\n # it as an XHTML tree\r\n try:\r\n response_msg_div = etree.XML('<div>%s</div>' % str(response_msg))\r\n\r\n # If we can't do that, create the <div> and set the message\r\n # as the text of the <div>\r\n except:\r\n response_msg_div = etree.Element('div')\r\n response_msg_div.text = str(response_msg)\r\n\r\n # Set the css class of the message <div>\r\n response_msg_div.set(\"class\", \"response_message\")\r\n\r\n return response_msg_div", "def htmlText(text, attr='', escapeText=False):\n return '<div%s>%s</div>\\n' % (sep(attr),escape(text) if escapeText else text)", "def html_tag(tag):\n def wrap_text(msg):\n print(\"<{0}>{1}</{0}>\".format(tag, msg))\n\n return wrap_text", "def html_tag(tag):\n def wrap_text(msg):\n print(\"<{0}>{1}</{0}>\".format(tag, msg))\n return wrap_text", "def wrap(body):\n return \"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"> \n <title>OpenWhisk Crud Demo</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <link rel=\"stylesheet\" href=\"%s\">\n <script src=\"%s\"></script>\n <script src=\"%s\"></script>\n </head>\n <body>\n <div class=\"container\">%s</div>\n </body>\n</html>\n\"\"\" % (BOOTSTRAP_CSS, JQUERY, BOOTSTRAP_JS, body)", "def draw_description(html: str, ref) -> None:\n if html is not None:\n ref.current.innerHTML = html", "def codetag(txt):\n return \"<code>%s<code>\" % (txt, )", "def html_error(string):\n return html_div(string, \"error\")", "def _repr_html_(self):\n if self.container_id():\n return \"<i>This widget is already shown in this notebook</i>\"\n \n container_id = self.id + '_container'\n def set_cointainer_id():\n self.container_id._set(container_id)\n # Set container id, this gets applied in the next event loop\n # iteration, so by the time it gets called in JS, the div that\n # we define below will have been created.\n from ..app import call_later\n call_later(0.1, set_cointainer_id) # todo: always do calls in next iter\n return \"<div class='flx-container' id=%s />\" % container_id", "def mdhtml_to_html(data_str):\n mdrenderer = mistune.Renderer()\n markdown = mistune.Markdown(renderer=mdrenderer)\n return markdown(data_str)", "def output_to_html(string_data):\n raise NotImplementedError(\"This function is not yet Implemented!\")", "def from_html(html_code, **kwargs):\n ...", "def wrap_string(input_str):\r\n return textwrap.wrap(input_str, 80)", "def vwrap(self, elem: str, attrs: str = \"\") -> \"PyHTML\":\n if elem not in PyHTML.VOID_ELEMENTS:\n raise ValueError(f\"Use `wrap` for non-void element: {elem}\")\n self.append(self._open_tag(elem, attrs))\n return self", "def inline_html(self, html):\n if self.options.get('escape'):\n return [escape(html)]\n return [html]", "def add_pagewrap(resume_output):\n # adds the open tag of page-wrap to list of lines representing html output\n resume_output.extend(['<div id=\"page-wrap\">'])\n\n # returns output code\n return resume_output", "def store(self, html, safe=False):\r\n self.rawHtmlBlocks.append((html, safe))\r\n placeholder = self.get_placeholder(self.html_counter)\r\n self.html_counter += 1\r\n return placeholder", "def generate_html(self):\n html_text_1 = \"\"\"\n <div class=\"concept\">\n\n \t\t<div class=\"concept-title\">\n\n \t\t\t\t\"\"\" + self.title\n\n html_text_2 = \"\"\"\n \t\t</div>\n\n \t\t<div class=\"concept-description\">\n\n\t\t <p>\n\t\t\t\n \t\t \t\t\"\"\" + self.description + \"\"\" \n \n </p>\"\"\"\n\n html_text_3 = '''\n\n \t\t</div>\n\n </div>'''\n\n return html_text_1 + html_text_2 + html_text_3", "def __init__(\n self, code: str = \"\", language: str = \"python\", sizing_mode=\"stretch_width\", **kwargs\n ):\n code_markdown = f\"\"\"\n```{language}\n{code}\n```\n\"\"\"\n code_html = markdown.markdown(\n code_markdown, extensions=MARKDOWN_EXTENSIONS, output_format=\"html5\"\n )\n super().__init__(code_html, sizing_mode=sizing_mode, **kwargs)", "def html_wrapper(content):\n\n header = '''<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"utf-8\">\n <title>''' + SITE_NAME + '''</title>\n</head>\n<body>\n'''\n\n footer = '''\n</body>\n</html>'''\n return header + content + footer" ]
[ "0.68858933", "0.67504036", "0.6205391", "0.6190861", "0.5886693", "0.58730954", "0.5765113", "0.56943285", "0.568877", "0.56636864", "0.5630165", "0.55836976", "0.5572767", "0.5570544", "0.5496482", "0.5484527", "0.5480989", "0.5466087", "0.5461017", "0.5433532", "0.5414356", "0.5396884", "0.5379434", "0.52972305", "0.5286801", "0.52745676", "0.52736247", "0.52639323", "0.5246382", "0.5215968" ]
0.72967833
0
Update base branch and rebase topic branch.
def update_base_branch(self): # Make sure base branch is up to date print("Checking out base branch '{}'...".format(self.base_branch)) self.git.checkout(self.base_branch) print('Updating base branch...') self.git.pull('--rebase')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rebase_topic_branch_and_push(self):\n # Rebase topic branch\n print('Checking out topic branch..')\n self.git.checkout(self.topic_branch)\n print('Updating topic branch with work from base branch...')\n self.git.rebase(self.base_branch)\n\n # Push rebased version (so it'll get marked as merged later if on\n # Github)\n print('Pushing updated topic branch...')\n self.git.push('--force')", "def merge_and_cleanup(self):\n print('Checking out base branch and merging topic branch...')\n self.git.checkout(self.base_branch)\n self.git.merge('--ff-only', self.topic_branch)\n\n # Push merge and delete topic branch\n print('Pushing base branch with topic branch merged...')\n self.git.push()\n print('Deleting remote topic branch...')\n self.git.push('origin', ':{}'.format(self.topic_branch))\n\n # Optionally delete local topic branch\n if self.delete_local:\n print('Deleting local topic branch...')\n self.git.branch('-D', self.topic_branch)", "def __init__(self, base_branch, topic_branch=None, delete_local=False):\n super(TopicMerge, self).__init__(base_branch)\n\n self.topic_branch = topic_branch\n self.delete_local = delete_local\n\n if not topic_branch:\n self.topic_branch = self.active_branch()\n print(\"Using active branch '{}' for topic branch.\".format(self.topic_branch))\n\n if self.topic_branch == self.base_branch:\n raise Exception(\"Topic branch and base branch shouldn't be the same.\")", "def update(self, branch=None):\n if branch is None:\n branch = self.branch\n\n print \"*** Updating to branch '%s'\" % branch\n commands.pull(ui.ui(), self._repository, self.url)\n commands.update(ui.ui(), self._repository, None, branch, True)", "def pull(ctx, path_base):\n with ctx.cd(path_base):\n ctx.run('git reset --hard')\n ctx.run('git pull origin master')", "def rebase(self):\n self.cm.rebase()", "def svn_rebase():\n output = str(git.svn.rebase()).strip()\n if not output.endswith('Current branch master is up to date.'):\n print('\"' + output + '\"')", "def create_topic_branch(self, topic_branch_name):\n print(\"Creating topic branch locally...\")\n self.git.checkout(self.base_branch)\n self.git.checkout('-b', topic_branch_name)\n print(\"Pushing topic branch to base branch's remote...\")\n self.git.push('-u', self.base_branch_remote(), topic_branch_name)", "def update_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n vprint ('Fast-forwarding', name, 'to', main_branch)\n ex (\"cd $DOC_ROOT/\" + product + \" && git fetch . \" + main_branch + \":\" + name)", "def update_branch(branch, repo, options):\n update = None\n\n remote = repo.get_merge_branch(branch)\n if not remote:\n gbp.log.warn(\"No branch tracking '%s' found - skipping.\" % branch)\n return False\n\n can_fast_forward, up_to_date = repo.is_fast_forward(branch, remote)\n\n if up_to_date: # Great, we're done\n gbp.log.info(\"Branch '%s' is already up to date.\" % branch)\n return True\n\n if can_fast_forward:\n update = 'merge'\n else:\n if options.force == 'merge':\n gbp.log.info(\"Non-fast forwarding '%s' due to --force=merge\" % branch)\n update = 'merge'\n elif options.force == 'clean':\n gbp.log.info(\"Checking out clean copy of '%s' due to --force=clean\" % branch)\n update = 'clean'\n else:\n gbp.log.warn(\"Skipping non-fast forward of '%s' - use --force or \"\n \"update manually\" % branch)\n\n if update:\n gbp.log.info(\"Updating '%s'\" % branch)\n if repo.branch == branch:\n if update == 'merge':\n repo.merge(remote)\n elif update == 'clean':\n # Have to drop our current branch\n tmpbranch = \"_gbptmp-\"+branch\n gbp.log.debug(\"Checking out '%s' to '%s'\" % (remote, tmpbranch))\n repo.create_branch(tmpbranch, remote)\n gbp.log.debug(\"Switching current branch to '%s'\" % (tmpbranch))\n repo.set_branch(tmpbranch)\n gbp.log.debug(\"Dropping branch '%s'\" % branch)\n repo.delete_branch(branch)\n gbp.log.info(\"Renaming branch '%s' to '%s'\" % (tmpbranch, branch))\n repo.rename_branch(tmpbranch, branch)\n else:\n if can_fast_forward or (update == 'clean'):\n sha1 = repo.rev_parse(remote)\n repo.update_ref(\"refs/heads/%s\" % branch, sha1,\n msg=\"gbp: forward %s to %s\" % (branch, remote))\n elif update == 'merge':\n # Merge other branch, if it cannot be fast-forwarded\n current_branch=repo.branch\n repo.set_branch(branch)\n repo.merge(remote)\n repo.set_branch(current_branch)\n\n return (update != None)", "def test_reset_to_remote_after_rebase(self) -> None:\n (\n self.repo_sandbox\n .new_branch(\"branch-0\")\n .commit()\n .push()\n .new_branch(\"branch-1\")\n .commit()\n .push()\n .check_out(\"branch-0\")\n .commit()\n )\n rewrite_branch_layout_file(\"branch-0\\n\\tbranch-1\")\n\n with fixed_author_and_committer_date_in_past():\n assert_success(\n [\"traverse\", \"-y\"],\n \"\"\"\n Pushing branch-0 to origin...\n\n Checking out branch-1\n\n branch-0\n |\n x-branch-1 *\n\n Rebasing branch-1 onto branch-0...\n\n Branch branch-1 diverged from (and has older commits than) its remote counterpart origin/branch-1.\n Resetting branch branch-1 to the commit pointed by origin/branch-1...\n\n branch-0\n |\n x-branch-1 *\n\n Reached branch branch-1 which has no successor; nothing left to update\n \"\"\"\n )", "def _switchBranch(self, release):\n if release is None:\n self.branch = None\n self.branch_dir = None\n log.info('No release branch available')\n else:\n self.wc.update()\n assert self.wc.exists('branches/' + release)\n io.linesToFile(self.path(self.BRANCH_FILE), [release])\n self.branch = release\n self.branch_dir = 'branches/' + release\n self.wc.update(self.branch_dir, depth='infinity')\n log.info('Working on branch ' + self.branch)", "def reset_branch(ctx, name, sha, hard):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo(\"Updating {} branch...\".format(name), break_line=False)\n gh.reset_branch(name=name, sha=sha, hard=hard)\n log.echo('Branch {} is now at {} '.format(name, sha), break_line=False)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise", "def update_changelog(package_id: str, base_branch: str, verbose: bool):\n if _update_changelog(package_id, base_branch, verbose, True):\n sys.exit(64)", "def push(self, base_repo, branch=\"master\"):\n base_repo.push_to(self, branch)", "def check_out_topic_branch_from_remote(self):\n self.git.checkout('-b', self.topic_branch, '{}/{}'.format(self.base_branch_remote(), self.topic_branch))", "def _update_head(self, index_entry, branch, new_id):\r\n index_entry['versions'][branch] = new_id\r\n self.db_connection.update_course_index(index_entry)", "def update_rc_branch(ctx, mainline, rc):\n repo = ctx.obj\n rc = try_context(repo, rc, \"rc\", \"rc_ref\")\n\n if mainline == rc:\n raise ValueError(\"Specifying the same branch for mainline and rc\"\n \" will result in dataloss. The mainline branch\"\n \" will be deleted, then the rc branch will be\"\n \" created from the now non-existent mainline branch\")\n\n branch_protection_enabled = False\n\n # check if branch exists\n if rc in (b.name for b in repo.iter_branches()):\n logging.debug(\"Branch {} exists\".format(rc))\n # rc branch exists\n branch_protection_response = branch_api_request(repo, rc, 'GET')\n if branch_protection_response.status_code == 200:\n # rc branch exists and protection enabled\n logging.debug(\"Branch {branch} has protection enabled,\"\n \" config: {bp_config}\"\n .format(branch=rc,\n bp_config=branch_protection_response.json()))\n branch_protection_enabled = True\n # disable branch protection\n r = branch_api_request(repo, rc, 'DELETE')\n r.raise_for_status()\n logging.debug(\"Branch protection disabled\")\n elif branch_protection_response.status_code == 404:\n # rc branch exists without protection, so it doesn't need\n # to be disabled\n # TODO: create jira issue about unprotected branch?\n pass\n else:\n # failure retrieving branch protection status\n branch_protection_response.raise_for_status()\n\n # Delete branch\n r = repo._session.request(\n 'DELETE',\n repo.git_refs_urlt.expand(sha=\"heads/{}\".format(rc)))\n r.raise_for_status()\n logging.debug(\"Branch {} deleted\".format(rc))\n\n mainline_sha = repo.branch(mainline).commit.sha\n logging.debug(\"Mainline SHA: {}\".format(mainline_sha))\n\n # create rc branch pointing at head of mainline\n repo.create_ref(\"refs/heads/{}\".format(rc), mainline_sha)\n logging.debug(\"Branch {} created\".format(rc))\n\n # Skeleton branch protection data, used to protect a new branch.\n protection_data = {\n \"required_status_checks\": None,\n \"enforce_admins\": True,\n \"required_pull_request_reviews\": {\n \"dismissal_restrictions\": {},\n \"dismiss_stale_reviews\": False,\n \"require_code_owner_reviews\": False\n },\n \"restrictions\": None\n }\n\n # Incorporate previous branch protection data if the branch was\n # protected perviously\n if branch_protection_enabled:\n stored_bpd = branch_protection_response.json()\n protection_data.update(stored_bpd)\n # The github api returns enforce_admins as dict, but requires it to\n # be sent as a bool.\n protection_data['enforce_admins'] \\\n = stored_bpd['enforce_admins']['enabled']\n\n # Enable branch protection\n r = branch_api_request(repo, rc, 'PUT',\n data=json.dumps(protection_data))\n r.raise_for_status()\n logging.debug(\"Branch Protection enabled for branch {}\".format(rc))\n\n # Ensure the rc branch was not updated to anything else while it was\n # unprotected. Stored mainline_sha is used incase mainline has\n # moved on since the SHA was acquired.\n assert mainline_sha == repo.branch(rc).commit.sha\n logging.debug(\"rc branch update complete\")", "def ensure_sync_master_branch(self):\n # TODO(robertocn): Investigate what causes the states mentioned in the\n # docstring in the first place.\n self.api.m.git('update-ref', 'refs/heads/master',\n 'refs/remotes/origin/master')\n self.api.m.git('checkout', 'master', cwd=self.api.m.path['checkout'])", "def sync_from_upstream(self):\n if not self.missing_branches:\n self.log(f\"All branches are synced, nothing to do here.\")\n return\n\n with tempfile.TemporaryDirectory() as tmpdir:\n src_path = Path(tmpdir) / self.deb_model.src\n self.deb_model.base.clone(cwd=tmpdir)\n for branch in self.missing_branches:\n self.log(f\"Processing branch {branch}\")\n self.deb_model.base.checkout(branch, new_branch=True, cwd=str(src_path))\n\n changelog_fn = src_path / \"debian/changelog\"\n changelog_fn_tpl = src_path / \"debian/changelog.in\"\n\n k8s_major_minor = semver.VersionInfo.parse(branch.lstrip(\"v\"))\n\n changelog_context = {\n \"deb_version\": f\"{str(k8s_major_minor)}-0\",\n }\n\n self.log(f\"Writing template vars {changelog_context}\")\n changelog_out = changelog_fn_tpl.read_text()\n changelog_out = self.render(changelog_fn_tpl, changelog_context)\n changelog_fn.write_text(changelog_out)\n\n self.log(f\"Committing {branch}\")\n self.deb_model.base.add([str(changelog_fn)], cwd=str(src_path))\n self.deb_model.base.commit(\n f\"Creating branch {branch}\", cwd=str(src_path)\n )\n self.deb_model.base.push(ref=branch, cwd=str(src_path))", "def pull(explicit=False):\n repo = git.repo()\n check_detached_head()\n saved_current_branch = repo.current_branch()\n\n commit()\n remote = remote_branch() \n\n # fetch. Dont use pull because we anyway have to local branches two deal\n # with: free and nice\n repo.fetch()\n\n # merge (updated) remote branch into free branch\n free = free_branch() \n if free:\n repo.checkout(free)\n repo.merge(remote)\n\n # rebase nice branch onto (updated) remote branch\n # todo: what if the above pull fails? Then the nice_branch is not rebased which leads to troubles later\n # todo: should be done automatically within pull if nice-branch is setuped correctly\n nice = nice_branch() \n if nice:\n repo.checkout(nice)\n repo.rebase(remote)\n\n if explicit:\n repo.checkout(saved_current_branch)", "def switch_branch(branch, rdir):\r\n # Get the latest remote\r\n try:\r\n cmd_log(['git', 'fetch', ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to fetch remote: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n\r\n # Check if the branch is available from the remote.\r\n cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of remote branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n if not branch in output:\r\n raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING)\r\n # Check it the remote branch has already been made locally\r\n cmd = ['git', 'branch', '-a', ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of local branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n branches = []\r\n for line in output.split('\\n'):\r\n branches.append(line.replace('*', '').strip())\r\n\r\n if branch not in branches:\r\n # Checkout with -b since it is remote only\r\n cmd = ['git', 'checkout', '--force', '--track',\r\n '-b', branch, 'origin/{0}'.format(branch), ]\r\n try:\r\n cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to checkout remote branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n # Go ahead and reset hard to the newest version of the branch now that we know\r\n # it is local.\r\n try:\r\n cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to reset to branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)", "def update_from_repo():\n\treturn", "def change_repo_base(self, repo_base):\n self.user_con.change_repo_base(repo_base=repo_base)", "def update():\n call('git -C ~/norminette+ pull', shell=True)", "def update_branch(self, name, sha):\n branch_info = {\n 'sha': sha,\n }\n resp = self.patch('git/refs/heads/{}'.format(name), json=branch_info)\n\n try:\n resp.raise_for_status()\n except Exception:\n logger.error(resp.json())\n raise\n\n return resp.json()", "def update(context, user=get_local_user(), remote=False, instance=None, branch=BRANCH):\n no_stack = None\n no_compose = False\n\n command = f\"git checkout {branch} || git pull && git checkout {branch}\"\n run_command(context, user, remote, instance, no_stack, command, no_compose)\n\n command = \"git pull\"\n run_command(context, user, remote, instance, no_stack, command, no_compose)", "def __gitSubmodulesUpdate(self):\n self.vcs.gitSubmoduleUpdate(self.project.getProjectPath())", "def set_branch(self, value):\n self.update(value)", "def merge_with(self, topic):\n with transaction.atomic():\n if self == topic:\n return self\n if (\n self.branched_from\n and topic.branched_from\n and self.branched_from != topic.branched_from\n ):\n raise ValueError(\"Cannot merge topics with different branched_from topics.\")\n if self.most_recent.semester >= topic.most_recent.semester:\n Course.objects.filter(topic=topic).update(topic=self)\n if topic.branched_from and not self.branched_from:\n self.branched_from = topic.branched_from\n self.save()\n topic.delete()\n return self\n else:\n Course.objects.filter(topic=self).update(topic=topic)\n if self.branched_from and not topic.branched_from:\n topic.branched_from = self.branched_from\n topic.save()\n self.delete()\n return topic" ]
[ "0.85658085", "0.6386416", "0.6250974", "0.6157709", "0.607403", "0.603884", "0.60378253", "0.5953686", "0.5821291", "0.57876724", "0.5657849", "0.56300837", "0.5626506", "0.54639095", "0.5381909", "0.53807336", "0.5374803", "0.53547436", "0.5313713", "0.5183402", "0.5182366", "0.51800334", "0.5077148", "0.50586385", "0.5045197", "0.50376165", "0.5031152", "0.501421", "0.49934703", "0.4983102" ]
0.79265606
1
Create topic branch locally and remotely.
def create_topic_branch(self, topic_branch_name): print("Creating topic branch locally...") self.git.checkout(self.base_branch) self.git.checkout('-b', topic_branch_name) print("Pushing topic branch to base branch's remote...") self.git.push('-u', self.base_branch_remote(), topic_branch_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(github_token, branch_name, repository, sha):\n create_branch(github_token, branch_name, repository, sha)\n click.echo(f\"Successfully created branch {branch_name}\")", "def create_branch(self):\n os.chdir(str(self.repository_path))\n sh.git.checkout('master')\n sh.git.checkout('-b', self.branch)\n logger.debug('Branch {} created', self.branch)", "def check_out_topic_branch_from_remote(self):\n self.git.checkout('-b', self.topic_branch, '{}/{}'.format(self.base_branch_remote(), self.topic_branch))", "def _make_release_branch(self):\n user = getpass.getuser()\n if not user == self._user:\n raise Error('the command should only be run as user %s' % self._user)\n branch = self._branch\n # get the latest master updates\n subprocess.check_call('git remote update', shell=True)\n subprocess.check_call('git checkout master', shell=True)\n # does a git pull and updates the submodules\n GitUtil.update_submodules()\n # get the latest commit before the release is cut\n self._latest_commit = GitUtil.get_latest_commit()\n print 'Making release branch %s' % branch\n # create the new release branch\n GitUtil.create_branch(branch)\n print TermColor.ColorStr('Created remote branch %s' % branch, 'GREEN')", "def __init__(self, base_branch, topic_branch=None, delete_local=False):\n super(TopicMerge, self).__init__(base_branch)\n\n self.topic_branch = topic_branch\n self.delete_local = delete_local\n\n if not topic_branch:\n self.topic_branch = self.active_branch()\n print(\"Using active branch '{}' for topic branch.\".format(self.topic_branch))\n\n if self.topic_branch == self.base_branch:\n raise Exception(\"Topic branch and base branch shouldn't be the same.\")", "def create_topic (self):\n return self.tm.create_topic()", "def prepare_deploy(ticket=None, msg=None, branch=None):\n test()\n commit(ticket, msg)\n push(branch)\n pull(branch)", "def create_topic(project_id, topic_id):\n topic_path = PUBLISHER_CLIENT.topic_path(project_id, topic_id)\n topic = PUBLISHER_CLIENT.create_topic(request={\"name\": topic_path})\n print(\"Created topic: {}\".format(topic.name))", "def create_branch(ctx, name, sha):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Creating branch...', break_line=False)\n branch = gh.create_branch(name=name, sha=sha)\n log.checkmark()\n log.echo('Branch {} created at {}'.format(name, sha))\n return branch\n except BaseException as _:\n log.xmark()\n raise", "def test_heads_create_new_branch_name(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert \"branch\" == branch.name", "def test_heads_create_new_branch_at_another_branch(repository: Repository) -> None:\n main = repository.head\n branch1 = repository.heads.create(\"branch1\")\n\n repository.checkout(branch1)\n repository.commit()\n\n repository.checkout(main)\n branch2 = repository.heads.create(\"branch2\", branch1.commit)\n\n assert branch1.commit == branch2.commit", "def rebase_topic_branch_and_push(self):\n # Rebase topic branch\n print('Checking out topic branch..')\n self.git.checkout(self.topic_branch)\n print('Updating topic branch with work from base branch...')\n self.git.rebase(self.base_branch)\n\n # Push rebased version (so it'll get marked as merged later if on\n # Github)\n print('Pushing updated topic branch...')\n self.git.push('--force')", "def create_branch_with_patch(self,\n branch_name,\n message,\n patch,\n author,\n force_push=False):\n self.clean()\n\n try:\n # This won't be exercised in production because wpt-exporter\n # always runs on a clean machine. But it's useful when running\n # locally since branches stick around.\n _log.info('Deleting old branch %s', branch_name)\n self.run(['git', 'branch', '-D', branch_name])\n except ScriptError:\n # This might mean the branch wasn't found. Ignore this error.\n pass\n\n _log.info('Creating local branch %s', branch_name)\n self.run(['git', 'checkout', '-b', branch_name])\n\n # Remove Chromium WPT directory prefix.\n patch = patch.replace(RELATIVE_WPT_TESTS, '')\n\n _log.info('Author: %s', author)\n if '<' in author:\n author_str = author\n else:\n author_str = '%s <%s>' % (author, author)\n\n # TODO(jeffcarp): Use git am -p<n> where n is len(RELATIVE_WPT_TESTS.split(/'))\n # or something not off-by-one.\n self.run(['git', 'apply', '-'], input=patch)\n self.run(['git', 'add', '.'])\n self.run(['git', 'commit', '--author', author_str, '-am', message])\n\n # Force push is necessary when updating a PR with a new patch\n # from Gerrit.\n if force_push:\n self.run(['git', 'push', '-f', 'origin', branch_name])\n else:\n self.run(['git', 'push', 'origin', branch_name])", "def create_branch_from_issue(jira_url, jira_username, jira_api_key, project_key, source_branch_name, issue_key):\n click.echo('Branch \"{}\" was created'.format(\n create_branch_func(\n source_branch_name, get_branch_name(jira_url, jira_username, jira_api_key, issue_key, project_key)\n )\n ))", "def test_heads_create_new_branch_commit(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert repository.head.commit == branch.commit", "def push(self, remote, branch, *args):\n return self.cmd('push', remote, branch, *args)", "def _create_topic(self):\n topic_name = self.generate_name()\n try:\n topic = self.sns.create_topic(Name=topic_name)\n except Exception as e:\n raise RuntimeError('SNS could create topic: %s' % e)\n self.topic_name, self.topic = topic_name, topic", "def create_topic():\n nodes = Node.query.all()\n form = TopicForm(nodes)\n if request.method == 'POST':\n topic = Topic(title=request.form.get('title'),\n content=request.form.get('content'),\n node_id=request.form.get('node_id'),\n user=current_user._get_current_object())\n db.session.add(topic)\n db.session.commit()\n return jsonify({\"result\": 'ok'})\n\n return render_template('main/create_topic.html', nodes=nodes, form=form)", "def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))", "def start(self, remote, branch, depth, tracking):\n\n if branch not in self.repo.heads:\n if not is_offline():\n return_code = self.fetch(remote, ref=branch, depth=depth)\n if return_code != 0:\n sys.exit(1)\n return_code = self._create_branch_local(branch)\n if return_code != 0:\n self._exit('', return_code=return_code)\n return_code = self._checkout_branch_local(branch)\n if return_code != 0:\n self._exit('', return_code=return_code)\n else:\n branch_output = fmt.ref_string(branch)\n print(' - ' + branch_output + ' already exists')\n correct_branch = self._is_branch_checked_out(branch)\n if correct_branch:\n print(' - On correct branch')\n else:\n return_code = self._checkout_branch_local(branch)\n if return_code != 0:\n self._exit('', return_code=return_code)\n if tracking and not is_offline():\n self._create_branch_remote_tracking(branch, remote, depth)", "def repository_create_hosted():\n pass", "def branch(self, name, ref=\"HEAD\"):\n self._git.create_head(name, ref)\n self.checkout(name)", "def CreateTopic(self, TopicId, TopicStrings=None):\n if len(TopicStrings) >= 2:\n ticker, field = TopicStrings\n logging.info(f\"CreateTopic {TopicId}, {ticker}|{field}\")\n if not ticker:\n return None\n\n if ticker == \"set_token\":\n self.finnhub_token = field\n self.start_conn_event.set()\n\n new_topic = SimpeVarTopic(TopicId, TopicStrings)\n self.topics_by_key[(ticker)] = field\n self.updatedTopics[TopicId] = \"Finnhub token set\"\n else:\n new_topic = StockTickTopic(TopicId, TopicStrings)\n ticker = ticker.upper()\n self.topics_by_key[(ticker, field)] = new_topic\n subscribe_msg = f\"{{\\\"type\\\":\\\"subscribe\\\",\\\"symbol\\\":\\\"{ticker}\\\"}}\"\n logging.debug(subscribe_msg)\n try:\n self.async_loop.call_soon_threadsafe(lambda: self.send_message_queue.put_nowait(subscribe_msg))\n except Exception as e:\n logging.error(\"CreateTopic: {}\".format(repr(e)))\n else:\n logging.error(f\"Unknown param: CreateTopic {TopicId}, {TopicStrings}\")\n return None\n return new_topic", "def create_pubsub_topic(client, project, name):\n full_name = pubsub.topic_name(project, name)\n if client.get_topic(full_name):\n return\n\n client.create_topic(full_name)", "def create(cls, topic):\n\t\treturn cls(key_name=utils.get_hash_key_name(topic), topic=topic)", "def master():\n env.branch = 'master'", "def master():\n env.branch = 'master'", "def push_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n if opts.push:\n vprint (\"Pushing branch\", name)\n ex (\"cd $DOC_ROOT/\" + product + \" && git push origin refs/heads/\" + name,\n allow_fail=True)", "def deploy(\n context, instance, user=get_local_user(), initial=False, stack=None, branch=BRANCH,\n):\n remote = True\n\n if initial:\n clone(context, instance, user, branch)\n else:\n backup(context, user, remote, instance, stack)\n\n update(context, user, remote, instance, branch)\n up(context, user, remote, instance, stack)", "def test_heads_create_existing_branch_force(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n branch = heads.create(\"branch\", head.commit)\n updatefile(repository.path / \"a\")\n heads.create(branch.name, head.commit, force=True)\n assert head.commit == branch.commit" ]
[ "0.6686283", "0.6487901", "0.6256341", "0.6225223", "0.6147443", "0.609922", "0.6061899", "0.60441536", "0.5970918", "0.591403", "0.59020525", "0.5832282", "0.5827752", "0.58189726", "0.5775379", "0.5697713", "0.5670031", "0.5617806", "0.5587961", "0.5563945", "0.55238813", "0.5511258", "0.54855514", "0.5469849", "0.5398731", "0.5382484", "0.5382484", "0.53782535", "0.537811", "0.5349677" ]
0.8445331
0
Return name of active branch.
def active_branch(self): return self.repo.active_branch.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_branch_name(self):\n # type: () -> Optional[str]\n branch = self.get_current_branch()\n if branch:\n return branch.name\n return None", "def branch_name(self):\n return f'phab-diff-{self.diff_id}'", "def branch(self):\n return os.popen('git rev-parse --abbrev-ref HEAD').read().strip()", "def current_branch():\n return subprocess.check_output('git branch --show-current'.split()).decode().strip()", "def get_current_branch(path_to_repository):\n repo = Repo(path_to_repository)\n return repo.active_branch.name", "def branch(self) -> Optional[str]:\n return pulumi.get(self, \"branch\")", "def get_active_name(self):\n return self.get_name()", "def getBranchName(directory):\n return subprocess.check_output([\"git\",\"rev-parse\",\"--abbrev-ref\",\"HEAD\"],cwd=directory).strip()", "def get_branch():\n command = [\"git\", \"branch\", \"--show-current\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n branch_str = proc.stdout.readline()\n return branch_str.decode(\"utf-8\").rstrip()", "def get_branch():\n if os.getenv('GIT_BRANCH'):\n # Travis\n branch = os.getenv('GIT_BRANCH')\n elif os.getenv('BRANCH_NAME'):\n # Jenkins 2\n branch = os.getenv('BRANCH_NAME')\n else:\n branch = check_output(\n \"git rev-parse --abbrev-ref HEAD\".split(\" \")\n ).decode('utf-8').strip()\n\n return branch.replace(\"/\", \"_\")", "def get_branch(self):\n if self._repository:\n return self._repository.dirstate.branch()", "def branch(self):\n return self._changeset.get('branch', None)", "def default_branch(self) -> str:\n return pulumi.get(self, \"default_branch\")", "def topology_read_branch_name(self):\n result = ctypes.c_char_p(self.dss_obj.TopologyS(ctypes.c_int32(0), ctypes.c_int32(0)))\n return result.value.decode('ascii')", "def branch(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"branch\")", "def branch(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"branch\")", "def branch_nick(self):\n return self._branch_nick", "def get_current_remote_name(self) -> str:\n match = self.status()\n\n if match.branch_upstream is None: # no upstream set\n if match.branch_head is None:\n raise Exception(\"No branch found for git repository\")\n return match.branch_head\n if match.branch_head is None:\n return match.branch_upstream\n\n return match.branch_upstream.replace(\"/\" + match.branch_head, \"\")", "def get_git_branch():\n branch = \"\"\n try:\n # git > 2.22 could do 'git branch --show-current'\n branch = check_output(\n ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n\n # No git installed or project downloaded as a .zip\n except Exception:\n pass\n\n return branch.strip()", "def _branch(self):\n printer = Printer(None)\n ci_manager = CIManager(printer)\n return ci_manager.get_branch()", "def git_branch():\n result, output = popen('git branch', False, False)\n branch = None\n for line in output:\n if line.startswith('*'):\n branch = line.split('*')[-1].strip()\n break\n return branch", "def short_branch_name(branch):\n return branch.replace('refs/heads/', '')", "def get_current_branch(self, current_path):\n command = [\"git\", \"rev-parse\", \"--abbrev-ref\", \"HEAD\"]\n p = subprocess.Popen(\n command,\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n return output.decode(\"utf-8\").strip()\n else:\n raise Exception(\n \"Error [{}] occurred while executing [{}] command to get current branch.\".format(\n error.decode(\"utf-8\"), \" \".join(command)\n )\n )", "def get_branch(project_root: str) -> str:\n if os.path.isfile(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION'):\n with open(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION') as f:\n return f.read().replace('\\n', '')\n\n child = subprocess.Popen('cd {0} && git rev-parse --abbrev-ref HEAD'.format(project_root),\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)\n exit_code = child.wait()\n branch = child.stdout.read().decode()\n if len(branch) != 0:\n branch = branch.replace('\\n', '')\n else:\n return 'unknown'\n if exit_code == 0 and branch != 'HEAD':\n return branch\n else:\n return 'unknown'", "def active_branch(self) -> Head:\n # reveal_type(self.head.reference) # => Reference\n return self.head.reference", "def get_branch_names(self):\n return [\n branch.name for branch in self.repo.branches\n ]", "def get_current_branch(directory=None):\n cmd = 'git branch --no-color'\n output = check_output(cmd, shell=True, cwd=directory)\n output = output.splitlines()\n for token in output:\n if token.strip().startswith('*'):\n token = token[2:]\n if token == '(no branch)':\n return None\n return token\n return None", "def __str__(self):\n return self.branch_id", "def __gitShowBranch(self):\n self.vcs.gitShowBranch(self.project.getProjectPath())", "def branch(self, name: str) -> GitRef:\n _args = [\n Arg(\"name\", name),\n ]\n _ctx = self._select(\"branch\", _args)\n return GitRef(_ctx)" ]
[ "0.8120284", "0.7759571", "0.77387923", "0.76723415", "0.7610565", "0.75223535", "0.7266739", "0.7229904", "0.71897435", "0.7162496", "0.7041211", "0.7009984", "0.6989085", "0.69307643", "0.6906568", "0.6906568", "0.6826007", "0.6765414", "0.672135", "0.67150354", "0.66725886", "0.6556776", "0.65516776", "0.65428096", "0.65425026", "0.6535546", "0.6526369", "0.6514233", "0.64856833", "0.64800555" ]
0.87727827
0
Check whether a branch exists locally in the current repository.
def local_branch_exists(self, branch): return branch in self.repo.branches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def branch_exists(repo, branch, remote=False):\n ref = 'refs/remotes/origin/' + branch if remote else 'refs/heads/' + branch\n return subprocess.call(['git', 'show-ref', '-q', '--verify', ref],\n cwd=repo) == 0", "def branch_exists(branch):\n\n try:\n git('show-ref', branch)\n return True\n except subprocess.CalledProcessError:\n return False", "def remote_branch_exists(self, branch):\n try:\n self.git.show_ref(\"refs/remotes/{}/{}\".format(self.base_branch_remote(), branch))\n return True\n except git.exc.GitCommandError:\n return False", "def branch_exists(branch_name, local_only=False, directory=None):\n for branch in get_branches(local_only, directory):\n if branch.startswith('remotes/'):\n branch = branch.split('/')\n if len(branch) > 2:\n branch = '/'.join(branch[2:])\n if branch_name == branch:\n return True\n else:\n if branch_name == branch:\n return True\n return False", "def git_repo_branch_exists(repo: str, branch: str) -> bool:\n get_git_version()\n cmd = f\"git ls-remote {repo} {branch}\"\n # We might be tempted to use `--exit-code` with `git ls-remote`, but\n # `run_command` handles the `returncode` for us, so we'll rely on\n # the fact that stdout returns '' if the requested branch doesn't exist\n ret = run_command(cmd, capture=True)\n exists = ret.stdout != \"\"\n return exists", "def has_branch(self, branch):\n if self.branch == branch:\n return True\n return False", "def branch_exists(nametag, branches):\n for branch in branches:\n if branches[branch].name == nametag:\n return True\n return False", "def is_branch(wit_path, branch):\n\n branches = _get_references_data(wit_path)\n del branches['HEAD']\n return branch in branches.keys()", "def check_branch(subcommand, branch):\n if subcommand != \"checkout\":\n return\n # first make sure actual branch name was given\n if branch is None:\n return \"Branch name to checkout must be supplied with '-b' option\"\n # next check that the local repo is clean\n cmd = [\"git\", \"status\", \"--untracked-files=no\", \"--porcelain\"]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, universal_newlines=True)\n if p.stdout.strip():\n return \"Need to have clean working tree to checkout!\\n\\n\" + p.stdout\n # next check that the branch name doesn't already exist\n cmd = [\"git\", \"show-ref\", \"--verify\", \"--quiet\", \"refs/heads/\" + branch]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n if not p.returncode:\n return f\"Branch {branch!r} already exists\"", "def _is_branch(self, reference_name):\n return reference_name.startswith(\"refs/heads/\") or reference_name.startswith(\n \"refs/remotes/\"\n )", "def is_remote_reserve_branch_present(repo):\n reserve_name = phlgitu_ref.Name(_RESERVE_BRANCH_FQ_NAME)\n remote_ref_names = repo(\"ls-remote\").split()[1::2]\n return reserve_name.fq in remote_ref_names", "def master_branch(branch_name):\n\n if branch_name in MASTER_BRANCHES:\n return True\n\n return False", "def _is_current_branch(self, branch_name, current_branch_name):\n return branch_name == current_branch_name", "def _is_remote_branch(self, branch_reference):\n return branch_reference.startswith(\"refs/remotes/\")", "def is_master_version(cls):\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n command = \"git branch\"\n\n # We execute and get the command output.\n command_result = PyFunceble.helpers.Command(command).execute()\n\n for branch in command_result.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if branch.startswith(\"*\") and \"master\" in branch:\n # The current branch is `master`.\n\n # We return True.\n return True\n\n # The current branch is not `master`.\n\n # We return False.\n return False", "def exists(self):\n return self._repository is not None", "def ensure_remote_branch_is_tracked(branch):\n if branch == MASTER_BRANCH:\n # We don't need to explicitly track the master branch, so we're done.\n return\n\n # Ensure the specified branch is in the local branch list.\n output = subprocess.check_output(['git', 'branch', '--list'])\n for line in output.split('\\n'):\n if line.strip() == branch:\n # We are already tracking the remote branch\n break\n else:\n # We are not tracking the remote branch, so track it.\n try:\n sys.stdout.write(subprocess.check_output(\n ['git', 'checkout', '--track', 'origin/%s' % branch]))\n except subprocess.CalledProcessError:\n # Bail gracefully.\n raise SystemExit(1)", "def ensure_tracking_branches(args):\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n branch_missing = repo.command(\n [\"rev-parse\", \"--verify\", \"-q\", project.refspec],\n capture_stdout=True)\n \n if branch_missing:\n logging.warn(\"Branch %s does not exist in project %s. checking out.\" %\n (project.refspec, name))\n repo.command([\"branch\", \"--track\",\n project.tracking_branch, project.remote_refspec])", "def branch(self, current_path):\n p = subprocess.Popen(\n [\"git\", \"show-ref\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n results = []\n try:\n current_branch = self.get_current_branch(current_path)\n for line in output.decode(\"utf-8\").splitlines():\n # The format for git show-ref is '<SHA-1 ID> <space> <reference name>'\n # For this method we are only interested in reference name.\n # Reference : https://git-scm.com/docs/git-show-ref#_output\n commit_sha = line.strip().split()[0].strip()\n reference_name = line.strip().split()[1].strip()\n if self._is_branch(reference_name):\n branch_name = self._get_branch_name(reference_name)\n is_current_branch = self._is_current_branch(\n branch_name, current_branch\n )\n is_remote_branch = self._is_remote_branch(reference_name)\n upstream_branch_name = None\n if not is_remote_branch:\n upstream_branch_name = self.get_upstream_branch(\n current_path, branch_name\n )\n tag = self._get_tag(current_path, commit_sha)\n results.append(\n {\n \"is_current_branch\": is_current_branch,\n \"is_remote_branch\": is_remote_branch,\n \"name\": branch_name,\n \"upstream\": upstream_branch_name,\n \"top_commit\": commit_sha,\n \"tag\": tag,\n }\n )\n\n # Remote branch is seleted use 'git branch -a' as fallback machanism\n # to get add detached head on remote branch to preserve older functionality\n # TODO : Revisit this to checkout new local branch with same name as remote\n # when the remote branch is seleted, VS Code git does the same thing.\n if current_branch == \"HEAD\":\n results.append(\n {\n \"is_current_branch\": True,\n \"is_remote_branch\": False,\n \"name\": self._get_detached_head_name(current_path),\n \"upstream\": None,\n \"top_commit\": None,\n \"tag\": None,\n }\n )\n return {\"code\": p.returncode, \"branches\": results}\n except Exception as downstream_error:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": str(downstream_error),\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": error.decode(\"utf-8\"),\n }", "def _check_branch(opt, params):\n\n # Check the current branch and hash\n _get_branch(opt)\n\n if params.git_branch != opt.git_branch or params.git_hash != opt.git_hash:\n msg = 'You are not on the right branch or commit. Please run the following in the repository: \\n'\n msg += f'git checkout {params.git_branch}\\n'\n msg += f'git revert {params.git_hash}'\n sys.exit(msg)", "def is_dev_version(cls):\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n command = \"git branch\"\n\n # We execute and get the command output.\n command_result = PyFunceble.helpers.Command(command).execute()\n\n for branch in command_result.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if branch.startswith(\"*\") and (\"dev\" in branch or \"3.x\" in branch):\n # The current branch is `dev`.\n\n # We return True.\n return True\n\n # The current branch is not `dev`.\n\n # We return False.\n return False", "def exists(env):\n return targz.exists(env)", "def check_for_branch_op(op_info: ModuleIdentifierOpInfo):\n\n op = conn_graph.get_all_ops()[op_info.module_name]\n return_bool = True\n product = op.output\n if \"branch\" not in product.name:\n logger.error(\"branch not in product name\")\n return_bool = False\n if len(product.consumers) > 1:\n logger.error(\"branch op is not parent op's only consumer\")\n return_bool = False\n branch_op = product.consumers[0]\n if branch_op.type != \"branch\":\n logger.error(\"parent op's child op is not of type branch\")\n return_bool = False\n branch_product = branch_op.output\n if \"multiple_ops\" not in branch_product.name:\n logger.error(\"multiple_ops not in branch op's product's name\")\n return_bool = False\n if len(branch_product.consumers) <= 1:\n logger.error(\"branch op's product has one or fewer consumers\")\n return_bool = False\n for consumer in branch_product.consumers:\n for input_product in consumer.inputs:\n if input_product.producer == op:\n logger.error(\"parent op is still one of child op's inputs (as opposed to branch op)\")\n return_bool = False\n return return_bool", "def verify_branch(path, expected_branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch is %s:\" % expected_branch)\n branch = run_in_component(path, ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n branch = branch.strip()\n\n if branch == expected_branch:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You must be on branch %s to release, you are on %s\" % (expected_branch, branch))", "def exists(repo_path):\n\n if not ProjectRepo.existing_git_repository(repo_path):\n cprint(' - Project is missing', 'red')", "def check_branch_in_manifest(manifest_filename, meta):\n print \"Checking manifest {}\".format(manifest_filename)\n manifest_et = ET.parse(os.path.join(\"./manifest\", manifest_filename))\n project_et = manifest_et.find(\"./project[@name='{}']\".format(PROJECT))\n if project_et is None:\n print \"project {} not found\".format(PROJECT)\n return False\n\n # Compute the default branch for the manifest\n default_branch = \"master\"\n default_et = manifest_et.find(\"./default\")\n if default_et is not None:\n default_branch = default_et.get(\"branch\", \"master\")\n\n # Pull out the branch for the given project\n project_branch = project_et.get(\"revision\", default_branch)\n if project_branch != BRANCH:\n print \"project {} on branch {}, not {}\".format(PROJECT, project_branch, BRANCH)\n return False\n\n return True", "def git_has_object(project: Project, name: str) -> bool:\n ret = project.git(\"rev-parse\", \"--verify\", name, _ok_code=[0, 128])\n return ret.exit_code == 0", "def git_checkout_branch(name):\n\n if subprocess.call([\"git\", \"diff\", \"--quiet\", \"HEAD\"]) != 0:\n raise Exception(\"Dirty working tree; not checking out %s\" % name)\n\n if subprocess.call([\"git\", \"checkout\", name]) != 0:\n raise Exception(\"Could not checkout %s\" % name)", "def exists_ref(self, commit_id):\n pass", "def is_legacy_landinglog_branch_present(repo):\n legacy_landinglog_name = phlgitu_ref.Name(_LEGACY_LANDINGLOG_NAME)\n remote_ref_names = repo(\"ls-remote\").split()[1::2]\n return legacy_landinglog_name.fq in remote_ref_names" ]
[ "0.83275986", "0.8276479", "0.81830424", "0.8062521", "0.79845", "0.7687554", "0.7388475", "0.7297498", "0.69419837", "0.68392557", "0.6772355", "0.6571662", "0.6411818", "0.6379329", "0.6296787", "0.6248183", "0.6241988", "0.5957182", "0.5877177", "0.58717155", "0.58352304", "0.5783776", "0.5754732", "0.56719077", "0.5669187", "0.56395924", "0.5619853", "0.5616084", "0.56078416", "0.56077844" ]
0.86900705
0
Return remote of base branch.
def base_branch_remote(self): return self.git.config('--get', 'branch.{}.remote'.format(self.base_branch))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_gitlab_remote(self):\n return self.get_remote('gitlab')", "def get_git_upstream_remote():\n cmd = \"git remote get-url upstream\"\n if run_cmd(cmd):\n return \"upstream\"\n else:\n return \"origin\"", "def git_remote_url(self):\n return self._git_remote_url", "def git_remote(uri):\n remotes = git(['remote', '-v']).split('\\n')\n pattern = re.compile(r'(?iu)^(?P<name>[^ ]+)[\\t]+bzr::(?P<remote>[^ ]+)')\n for remote in remotes:\n log.debug('check remote: %s', remote)\n matches = pattern.match(remote)\n if matches and matches.group('remote') == uri:\n return matches.groups()\n return None, None", "def origin(self):\n for item in os.popen('git remote -v'):\n split_item = item.strip().split()\n if split_item[0] == 'origin' and split_item[-1] == '(push)':\n return split_item[1]", "def get_current_remote_name(self) -> str:\n match = self.status()\n\n if match.branch_upstream is None: # no upstream set\n if match.branch_head is None:\n raise Exception(\"No branch found for git repository\")\n return match.branch_head\n if match.branch_head is None:\n return match.branch_upstream\n\n return match.branch_upstream.replace(\"/\" + match.branch_head, \"\")", "def get_remote(repo, name='origin'):\n config_name = 'remote.{}.url'.format(name)\n return subprocess.check_output(['git', 'config', '--get',\n config_name], cwd=repo).rstrip()", "def git_remote(**kw):\n return sh('git', 'remote', **kw).strip().split('\\n')", "def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))", "def remote(self, name: str, **kwargs: Any) -> Optional[GitRemote]:\n\n try:\n ret = self.cmd.remote.show(\n name=name, no_query_remotes=True, log_in_real_time=True\n )\n lines = ret.split(\"\\n\")\n remote_fetch_url = lines[1].replace(\"Fetch URL: \", \"\").strip()\n remote_push_url = lines[2].replace(\"Push URL: \", \"\").strip()\n if remote_fetch_url != name and remote_push_url != name:\n return GitRemote(\n name=name, fetch_url=remote_fetch_url, push_url=remote_push_url\n )\n else:\n return None\n except exc.LibVCSException:\n return None", "def get_remote(self, name):\n repo = Repo('.')\n if not hasattr(repo, 'remotes'):\n raise NotFound()\n for remote in repo.remotes:\n if remote.name == name:\n return remote\n raise NotFound()", "def local_diff_branch():\n # Only allow specified remote and branch in local dev.\n remote = os.getenv(LOCAL_REMOTE_ENV)\n branch = os.getenv(LOCAL_BRANCH_ENV)\n if remote is not None and branch is not None:\n return '%s/%s' % (remote, branch)", "def _get_rebasebranch(self):\n logging.info('--- Get Rebasebranch ---')\n local_branch_candidates = {\n branch for branch in self.local_branches\n if branch == self.options.rebasebranch}\n remote_branch_candidates = {\n branch for branch in self.remote_branches\n if self.options.rebasebranch in branch}\n try:\n found_local_branch = local_branch_candidates.pop()\n except KeyError:\n gitwrapper.exit_with_error(\n 'No local branches named %r found.',\n self.options.rebasebranch)\n #\n if local_branch_candidates:\n gitwrapper.exit_with_error(\n 'Too many matching local branches found: %s, %s.',\n found_local_branch,\n ', '.join(local_branch_candidates))\n #\n if not remote_branch_candidates:\n gitwrapper.exit_with_error(\n 'No remote branches named %r found.',\n self.options.rebasebranch)\n #\n if len(remote_branch_candidates) > 2:\n # 1 if remote is not pushed, 2 if its pushed to remote\n gitwrapper.exit_with_error(\n 'Too many matching remote branches found: %s.',\n ', '.join(remote_branch_candidates))\n #\n self.local_branches = {found_local_branch}\n self.remote_branches = remote_branch_candidates\n logging.info('Found local branch %r.', found_local_branch)\n logging.info(\n 'Found remote branches %s.'\n ' and '.join(repr(branch) for branch in self.remote_branches))\n # We only rebase the specified branch\n self.tags = set()", "def svn_branch():\n return svn_url().split('/')[-1]", "def getBranch(self, repo=None):\n repos_list_url = \"/\".join([self.url, \"repos\", repo, \"branches\"])\n print repos_list_url\n request = urllib2.Request(repos_list_url, headers=self.headers)\n response = urllib2.urlopen(request)\n\n return response.read()", "def __fetch_remote_source(self):\n # type: () -> Union(Git, None)\n if self.source == 'git':\n return self.git_source_class(**self.configuration).fetch()\n return None", "def fetchref(self, ref):\n log.debug('[%s] Fetching ref: %s', self.name, ref)\n fetch_info = self.repo.remotes.origin.fetch(ref).pop()\n return fetch_info.ref", "def get_remote(repo: git.Repo, name: str) -> git.remote.Remote:\n for remote in repo.remotes:\n if remote.name == name:\n return remote\n return None", "def get_repo_branch(self):\n # Load HEAD and find ref.\n with open('{path}HEAD'.format(path=self.workpath), 'rb') as fp:\n ref = fp.read().strip().decode().split(': ')[1]\n\n print('[+] Downloading {}'.format(ref))\n\n # Requests for head hash and save\n head_url = '{base_url}{ref}'.format(base_url=self.base_url, ref=ref)\n data = self._request(head_url).read().strip()\n\n # Save the hash inside the ref file into the target place.\n ref_path = '/'.join(ref.split('/')[:-1])\n if not os.path.exists('{path}{ref_path}'.format(path=self.workpath, ref_path=ref_path)):\n os.makedirs('{path}{ref_path}'.format(path=self.workpath, ref_path=ref_path))\n with open('{path}{ref}'.format(path=self.workpath, ref=ref), 'wb') as fp:\n fp.write(data)\n\n # After get ref->head_hash, why not share it.\n self.head_hash = data.decode()", "def remote_origin_url(self):\n if self._remote_origin_url:\n return self._remote_origin_url\n\n topleveldata = self.git(\"config\", \"--get\", \"remote.origin.url\")\n self._remote_origin_url = topleveldata[0]\n return self._remote_origin_url", "def pull(self, verbose=True):\n fetch_cmd = [\"git\", \"fetch\"]\n if not verbose:\n fetch_cmd.append(\"-q\")\n subprocess.call(fetch_cmd, cwd=self.path)\n checkout_cmd = [\"git\", \"checkout\", \"origin/master\", \"-B\", \"master\"]\n if not verbose:\n checkout_cmd.append(\"-q\")\n return subprocess.call(checkout_cmd, cwd=self.path)", "def _is_remote_branch(self, branch_reference):\n return branch_reference.startswith(\"refs/remotes/\")", "def branch(self, current_path):\n p = subprocess.Popen(\n [\"git\", \"show-ref\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n results = []\n try:\n current_branch = self.get_current_branch(current_path)\n for line in output.decode(\"utf-8\").splitlines():\n # The format for git show-ref is '<SHA-1 ID> <space> <reference name>'\n # For this method we are only interested in reference name.\n # Reference : https://git-scm.com/docs/git-show-ref#_output\n commit_sha = line.strip().split()[0].strip()\n reference_name = line.strip().split()[1].strip()\n if self._is_branch(reference_name):\n branch_name = self._get_branch_name(reference_name)\n is_current_branch = self._is_current_branch(\n branch_name, current_branch\n )\n is_remote_branch = self._is_remote_branch(reference_name)\n upstream_branch_name = None\n if not is_remote_branch:\n upstream_branch_name = self.get_upstream_branch(\n current_path, branch_name\n )\n tag = self._get_tag(current_path, commit_sha)\n results.append(\n {\n \"is_current_branch\": is_current_branch,\n \"is_remote_branch\": is_remote_branch,\n \"name\": branch_name,\n \"upstream\": upstream_branch_name,\n \"top_commit\": commit_sha,\n \"tag\": tag,\n }\n )\n\n # Remote branch is seleted use 'git branch -a' as fallback machanism\n # to get add detached head on remote branch to preserve older functionality\n # TODO : Revisit this to checkout new local branch with same name as remote\n # when the remote branch is seleted, VS Code git does the same thing.\n if current_branch == \"HEAD\":\n results.append(\n {\n \"is_current_branch\": True,\n \"is_remote_branch\": False,\n \"name\": self._get_detached_head_name(current_path),\n \"upstream\": None,\n \"top_commit\": None,\n \"tag\": None,\n }\n )\n return {\"code\": p.returncode, \"branches\": results}\n except Exception as downstream_error:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": str(downstream_error),\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": error.decode(\"utf-8\"),\n }", "def get_branch():\n command = [\"git\", \"branch\", \"--show-current\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n branch_str = proc.stdout.readline()\n return branch_str.decode(\"utf-8\").rstrip()", "def test_remote(self):\n\n self.assertEqual(description.RepositoryDescription(\n '[email protected]:/example/remote', '/path/to/local').remote,\n implementation.RemoteRepository(\n '[email protected]:/example/remote'))", "def _remote_path(self):\n return self._remote_dir", "def remote(self, *args, **kwargs):\n return self.api.remote(*args, **kwargs)", "def test_pull_default_remote(self, repo):\n dest = os.path.join(self._tmpdir, 'cloned_repo')\n clone(['arg0', repo.path, dest])\n cloned = ComponentTestGitRepository(dest)\n self._check_repo_state(cloned, 'master', ['master'])\n eq_(pull(['argv0']), 0)\n assert len(repo.get_commits()) == 1", "def getRemoteHost():", "def cmd_get_diffbase(review_target, branch):\n return ['git', 'merge-base', review_target, branch]" ]
[ "0.68245757", "0.67746514", "0.674733", "0.6530229", "0.6530024", "0.6505973", "0.64553195", "0.64203966", "0.64174384", "0.63947403", "0.6328345", "0.6312695", "0.63110465", "0.6287203", "0.62429124", "0.6184216", "0.61142975", "0.6098868", "0.6066217", "0.60564506", "0.60373026", "0.6036886", "0.60207564", "0.59682846", "0.5920075", "0.59189045", "0.58996046", "0.5862666", "0.5827293", "0.57973045" ]
0.87765974
0
Create topic branch merge helper instance.
def __init__(self, base_branch, topic_branch=None, delete_local=False): super(TopicMerge, self).__init__(base_branch) self.topic_branch = topic_branch self.delete_local = delete_local if not topic_branch: self.topic_branch = self.active_branch() print("Using active branch '{}' for topic branch.".format(self.topic_branch)) if self.topic_branch == self.base_branch: raise Exception("Topic branch and base branch shouldn't be the same.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_topic_branch(self, topic_branch_name):\n print(\"Creating topic branch locally...\")\n self.git.checkout(self.base_branch)\n self.git.checkout('-b', topic_branch_name)\n print(\"Pushing topic branch to base branch's remote...\")\n self.git.push('-u', self.base_branch_remote(), topic_branch_name)", "def merge_and_cleanup(self):\n print('Checking out base branch and merging topic branch...')\n self.git.checkout(self.base_branch)\n self.git.merge('--ff-only', self.topic_branch)\n\n # Push merge and delete topic branch\n print('Pushing base branch with topic branch merged...')\n self.git.push()\n print('Deleting remote topic branch...')\n self.git.push('origin', ':{}'.format(self.topic_branch))\n\n # Optionally delete local topic branch\n if self.delete_local:\n print('Deleting local topic branch...')\n self.git.branch('-D', self.topic_branch)", "def create_branch(self):\n os.chdir(str(self.repository_path))\n sh.git.checkout('master')\n sh.git.checkout('-b', self.branch)\n logger.debug('Branch {} created', self.branch)", "def _make_release_branch(self):\n user = getpass.getuser()\n if not user == self._user:\n raise Error('the command should only be run as user %s' % self._user)\n branch = self._branch\n # get the latest master updates\n subprocess.check_call('git remote update', shell=True)\n subprocess.check_call('git checkout master', shell=True)\n # does a git pull and updates the submodules\n GitUtil.update_submodules()\n # get the latest commit before the release is cut\n self._latest_commit = GitUtil.get_latest_commit()\n print 'Making release branch %s' % branch\n # create the new release branch\n GitUtil.create_branch(branch)\n print TermColor.ColorStr('Created remote branch %s' % branch, 'GREEN')", "def main(github_token, branch_name, repository, sha):\n create_branch(github_token, branch_name, repository, sha)\n click.echo(f\"Successfully created branch {branch_name}\")", "def create_branch_from_issue(jira_url, jira_username, jira_api_key, project_key, source_branch_name, issue_key):\n click.echo('Branch \"{}\" was created'.format(\n create_branch_func(\n source_branch_name, get_branch_name(jira_url, jira_username, jira_api_key, issue_key, project_key)\n )\n ))", "def merge_feature(repo, cfg, model, date):\n repo.index.merge_tree(\n repo.heads.master, base=repo.merge_base(repo.heads.master, repo.head)\n )\n kwargs = {\n **author_committer_facts(model, date),\n **dict(head=True, parent_commits=(repo.heads.master.commit, repo.head.commit)),\n }\n repo.index.commit(message_of(cfg, model.ticket, \"merge_commit_words\"), **kwargs)\n repo.heads.master.commit = repo.head.commit\n repo.head.reference = repo.heads.master\n repo.delete_head(model.ticket)\n model = groom_model(model)\n\n return repo, model", "def create_branches(branches, pcoll, provider_options):\n\n logger.info('Branch count: %i' % len(branches))\n pcoll_tuple = ()\n\n for branch in branches:\n logger.info('Adding branch')\n output = create_graph(branch, pcoll, provider_options)\n pcoll_tuple = pcoll_tuple + (output,)\n\n logger.info('Transform: MergeBranches')\n output = pcoll_tuple | 'MergeBranches' >> MergeBranches()\n return output", "def create_branch(ctx, name, sha):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Creating branch...', break_line=False)\n branch = gh.create_branch(name=name, sha=sha)\n log.checkmark()\n log.echo('Branch {} created at {}'.format(name, sha))\n return branch\n except BaseException as _:\n log.xmark()\n raise", "def test_heads_create_new_branch_at_another_branch(repository: Repository) -> None:\n main = repository.head\n branch1 = repository.heads.create(\"branch1\")\n\n repository.checkout(branch1)\n repository.commit()\n\n repository.checkout(main)\n branch2 = repository.heads.create(\"branch2\", branch1.commit)\n\n assert branch1.commit == branch2.commit", "def rebase_topic_branch_and_push(self):\n # Rebase topic branch\n print('Checking out topic branch..')\n self.git.checkout(self.topic_branch)\n print('Updating topic branch with work from base branch...')\n self.git.rebase(self.base_branch)\n\n # Push rebased version (so it'll get marked as merged later if on\n # Github)\n print('Pushing updated topic branch...')\n self.git.push('--force')", "def merge(self, branch):\n\n if branch.username != self.username or branch.reponame != self.reponame:\n raise BranchError(\"Branch to merge must be in the same repository\")\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n LOG.debug(\"Merging from %r to %r\" % (branch, self))\n self._client.postjson(path=\"/users/%(username)s/repos/%(reponame)s/\"\n \"branches/%(name)s/merge\" % context,\n payload={\"from_branch\": branch.name})", "def merge_with(self, topic):\n with transaction.atomic():\n if self == topic:\n return self\n if (\n self.branched_from\n and topic.branched_from\n and self.branched_from != topic.branched_from\n ):\n raise ValueError(\"Cannot merge topics with different branched_from topics.\")\n if self.most_recent.semester >= topic.most_recent.semester:\n Course.objects.filter(topic=topic).update(topic=self)\n if topic.branched_from and not self.branched_from:\n self.branched_from = topic.branched_from\n self.save()\n topic.delete()\n return self\n else:\n Course.objects.filter(topic=self).update(topic=topic)\n if self.branched_from and not topic.branched_from:\n topic.branched_from = self.branched_from\n topic.save()\n self.delete()\n return topic", "def test_heads_create_new_branch_name(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert \"branch\" == branch.name", "def __branch_factory(self, action, task_activities):\n branches = action.findall(\"./branches_Branch\")\n for branch in branches:\n branch_type = get_branch_type(branch=branch)\n if \"probabilistic\" == branch_type:\n return self.__add_probabilistic_branch(action=action, task_activities=self.task_activities)\n elif \"type\" == branch_type:\n return self.__add_type_branch(action=action, task_activities=task_activities)\n elif \"detailed\" == branch_type:\n return self.__add_detailed_branch(action=action, task_activities=task_activities)\n elif \"simple\" == branch_type:\n return self.__add_simple_branch(action=self.action, task_activities=self.task_activities)\n else:\n raise ValueError(\"Unknown branch_type. Abort Mission.\")", "def _assign_branches(ctx, prl):\n heads = prl.set_heads\n if not heads:\n return None\n branch_dict = ctx.branch_dict()\n LOG.debug2('allowing branch creation: %s', ctx.branch_creation)\n # Assign branches to each of the received commits for pushed branches\n assigner = Assigner(branch_dict, heads, ctx)\n assigner.assign()\n return assigner", "def test_heads_create_new_branch_commit(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert repository.head.commit == branch.commit", "def merge(): #Status: WIP\r\n pass", "def test_topic_merge_other_forum(topic_normal):\n forum_other = Forum(title=\"Test Forum 2\", category_id=1)\n forum_other.save()\n\n topic_other = Topic(title=\"Test Topic 2\")\n post_other = Post(content=\"Test Content 2\")\n topic_other.save(user=topic_normal.user, forum=forum_other, post=post_other)\n\n assert not topic_normal.merge(topic_other)", "def _get_branches_to_merge(branch):\n branches = [(branch, branch.subfolder or '')]\n for dependency in branch.branch_dependency_ids:\n branches.append((dependency.merge_with_branch_id, dependency.merge_subfolder or ''))\n return branches[::-1]", "def create_branch(self, name, base_name, from_sha=False):\n\n logger.debug(\n 'GitHubAPI.create_branch: name={}, base_name={}'.format(\n name, base_name\n )\n )\n # raise an error if we can find the branch, continue if we get\n # a 404\n try:\n self.get_branch(name)\n except requests.exceptions.HTTPError:\n pass\n else:\n raise DuplicateBranchError(\n 'Branch already started. Run'\n '\\n\\tgit fetch --all && get checkout {}'.format(name)\n )\n\n if not from_sha:\n base = self.get_branch(base_name)\n base_sha = base['object']['sha']\n else:\n base_sha = base_name\n\n try:\n branch_info = {\n 'ref': 'refs/heads/{}'.format(name),\n 'sha': base_sha\n }\n except KeyError:\n logger.error('base repsonse: {}'.format(base))\n raise Exception(\n 'Could not locate the current SHA for '.format(base_name))\n\n resp = self.post('git/refs', json=branch_info)\n try:\n resp.raise_for_status()\n except Exception:\n logger.error(resp.json())\n raise\n\n return resp.json()", "def createSharedNotebook(self, authenticationToken, sharedNotebook):\r\n pass", "def test_branch_can_be_copied():\n\n setup_org()\n setup_repo()\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/master\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n responses.add(responses.POST, \"https://api.github.com/repos/my-org/my-repo/git/refs\",\n body=my_new_ref,\n content_type='text/json',\n status=201)\n\n responses.add(responses.GET, \"https://api.github.com/repos/my-org/my-repo/branches/main\",\n body=my_repo_branch,\n content_type='text/json',\n status=200)\n\n token = '__dummy__'\n org = \"my-org\"\n client = GithubRestClient(token)\n new_branch_name = \"main\"\n\n repo = get_repository(client, org, \"my-repo\")\n new_branch = copy_branch(repo, repo.default_branch, new_branch_name)\n assert None is not new_branch", "def _createMaster(self, *args, **kwds):\n raise NotImplementedError", "def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))", "def __init__(self, this_tree, other_branch, other_tree, target_subdir):\n # It is assumed that we are merging a tree that is not in our current\n # ancestry, which means we are using the \"EmptyTree\" as our basis.\n null_ancestor_tree = this_tree.branch.repository.revision_tree(\n revision.NULL_REVISION)\n super(MergeIntoMerger, self).__init__(\n this_branch=this_tree.branch,\n this_tree=this_tree,\n other_tree=other_tree,\n base_tree=null_ancestor_tree,\n )\n self._target_subdir = target_subdir\n self.other_branch = other_branch\n self.other_rev_id = other_tree.get_revision_id()\n self.other_basis = self.other_rev_id\n self.base_is_ancestor = True\n self.backup_files = True\n self.merge_type = merge.Merge3Merger\n self.show_base = False\n self.reprocess = False\n self.interesting_ids = None\n self.merge_type = Wrapper(Merge3MergeIntoMerger,\n target_subdir=self._target_subdir)\n self._finish_init()", "def test_heads_create_new_branch_at_ancestor(repository: Repository) -> None:\n parent = repository.head.commit\n updatefile(repository.path / \"a\")\n branch = repository.heads.create(\"branch\", parent)\n assert parent == branch.commit", "def branch_new(request, repo_id):\n repo = models.Repository.get_by_id(int(repo_id))\n if request.method != 'POST':\n form = BranchForm(initial={'url': repo.url,\n 'category': 'branch',\n })\n return respond(request, 'branch_new.html', {'form': form, 'repo': repo})\n form = BranchForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n branch = models.Branch(\n repo_key=repo.key,\n category=form.cleaned_data.get('category'),\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'branch_new.html', {'form': form, 'repo': repo})\n branch.repo_name = repo.name\n branch.put()\n return HttpResponseRedirect(reverse(repos))", "def create(title, head, base='master', message=''):\n review_info = {\n 'title': title,\n 'body': message,\n 'head': head,\n 'base': base,\n }\n\n data = json_encode(review_info)\n review = parse(gh_request('POST', '/repos/:user/:repo/pulls', body=data))\n printers.print_review_created(review)", "def mergetree(left, right, working_dir):\r\n # decorate and infer filenames for tips\r\n global INTERNAL_COUNT\r\n\r\n if not isinstance(left, TreeNode):\r\n filepath = str(left[0])\r\n name = basename(filepath.split('.')[0])\r\n left = TreeNode(Name=name)\r\n left.FilePath = filepath\r\n left.Processed = False\r\n left.PollPath = None # doesn't make sense for tips\r\n left.FullCommand = None\r\n left.EndTime = None\r\n left.StartTime = None\r\n left.TotalTime = None\r\n\r\n if not isinstance(right, TreeNode):\r\n filepath = str(right[0])\r\n name = basename(filepath.split('.')[0])\r\n right = TreeNode(Name=name)\r\n right.FilePath = filepath\r\n right.Processed = False\r\n right.PollPath = None # doesn't make sense for tips\r\n right.FullCommand = None\r\n right.EndTime = None\r\n right.StartTime = None\r\n right.TotalTime = None\r\n\r\n # internal node\r\n name = str(INTERNAL_COUNT)\r\n filepath = join(working_dir, name) + '.biom'\r\n merged = TreeNode(Name=name, Children=[left, right])\r\n merged.FilePath = filepath\r\n merged.Processed = False\r\n merged.PollPath = filepath + '.poll'\r\n merged.FullCommand = None\r\n merged.EndTime = None\r\n merged.StartTime = None\r\n merged.TotalTime = None\r\n\r\n INTERNAL_COUNT += 1\r\n return merged" ]
[ "0.6248353", "0.5955853", "0.5725158", "0.56283575", "0.5605361", "0.5596414", "0.5378078", "0.53646255", "0.53271574", "0.5255284", "0.52101886", "0.51747125", "0.5091524", "0.50761956", "0.50503206", "0.50297374", "0.5002231", "0.49973628", "0.4993629", "0.49935707", "0.4985014", "0.4977023", "0.49650073", "0.4944422", "0.48940185", "0.48923475", "0.4862262", "0.48069558", "0.48037642", "0.47818887" ]
0.69020957
0
Rebase topic branch with work from base branch and push.
def rebase_topic_branch_and_push(self): # Rebase topic branch print('Checking out topic branch..') self.git.checkout(self.topic_branch) print('Updating topic branch with work from base branch...') self.git.rebase(self.base_branch) # Push rebased version (so it'll get marked as merged later if on # Github) print('Pushing updated topic branch...') self.git.push('--force')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_topic_branch(self, topic_branch_name):\n print(\"Creating topic branch locally...\")\n self.git.checkout(self.base_branch)\n self.git.checkout('-b', topic_branch_name)\n print(\"Pushing topic branch to base branch's remote...\")\n self.git.push('-u', self.base_branch_remote(), topic_branch_name)", "def merge_and_cleanup(self):\n print('Checking out base branch and merging topic branch...')\n self.git.checkout(self.base_branch)\n self.git.merge('--ff-only', self.topic_branch)\n\n # Push merge and delete topic branch\n print('Pushing base branch with topic branch merged...')\n self.git.push()\n print('Deleting remote topic branch...')\n self.git.push('origin', ':{}'.format(self.topic_branch))\n\n # Optionally delete local topic branch\n if self.delete_local:\n print('Deleting local topic branch...')\n self.git.branch('-D', self.topic_branch)", "def __init__(self, base_branch, topic_branch=None, delete_local=False):\n super(TopicMerge, self).__init__(base_branch)\n\n self.topic_branch = topic_branch\n self.delete_local = delete_local\n\n if not topic_branch:\n self.topic_branch = self.active_branch()\n print(\"Using active branch '{}' for topic branch.\".format(self.topic_branch))\n\n if self.topic_branch == self.base_branch:\n raise Exception(\"Topic branch and base branch shouldn't be the same.\")", "def push(self, base_repo, branch=\"master\"):\n base_repo.push_to(self, branch)", "def update_base_branch(self):\n # Make sure base branch is up to date\n print(\"Checking out base branch '{}'...\".format(self.base_branch))\n self.git.checkout(self.base_branch)\n print('Updating base branch...')\n self.git.pull('--rebase')", "def rebase(self):\n self.cm.rebase()", "def test_reset_to_remote_after_rebase(self) -> None:\n (\n self.repo_sandbox\n .new_branch(\"branch-0\")\n .commit()\n .push()\n .new_branch(\"branch-1\")\n .commit()\n .push()\n .check_out(\"branch-0\")\n .commit()\n )\n rewrite_branch_layout_file(\"branch-0\\n\\tbranch-1\")\n\n with fixed_author_and_committer_date_in_past():\n assert_success(\n [\"traverse\", \"-y\"],\n \"\"\"\n Pushing branch-0 to origin...\n\n Checking out branch-1\n\n branch-0\n |\n x-branch-1 *\n\n Rebasing branch-1 onto branch-0...\n\n Branch branch-1 diverged from (and has older commits than) its remote counterpart origin/branch-1.\n Resetting branch branch-1 to the commit pointed by origin/branch-1...\n\n branch-0\n |\n x-branch-1 *\n\n Reached branch branch-1 which has no successor; nothing left to update\n \"\"\"\n )", "def pull(ctx, path_base):\n with ctx.cd(path_base):\n ctx.run('git reset --hard')\n ctx.run('git pull origin master')", "def check_out_topic_branch_from_remote(self):\n self.git.checkout('-b', self.topic_branch, '{}/{}'.format(self.base_branch_remote(), self.topic_branch))", "def svn_rebase():\n output = str(git.svn.rebase()).strip()\n if not output.endswith('Current branch master is up to date.'):\n print('\"' + output + '\"')", "def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))", "def rebase(self):\n log.debug(\"Rebase IDK working repository\")\n idk_repo = os.getcwd()\n \n # We assume we are in the root of the local repository directory because\n # DEFAULT_CONFIG is a relative path from there\n log.debug(\"Check for GIT information in: \" + os.curdir);\n # TODO add git check\n\n ### This would be nice to ultimately pull from the repo object, but the version of gitpython\n ### installed doesn't support remotes. \n origin = idk_repo\n\n log.debug( \"Does '%s' contain '%s'\", origin, MI_REPO_NAME)\n # Added second criteria as a quick fix to get buildbot working. Need a better\n # way of identifing the idk dir\n if origin.find(MI_REPO_NAME) < 0 and origin.find('/build') < 0:\n # Maybe we wound up close, so try a quick change, then fail\n try:\n os.chdir(MI_REPO_NAME)\n origin = os.getcwd()\n except:\n raise IDKWrongRunningDirectory(msg=\"Please run this process from the root your local MI git repository\")\n \n if origin.find(MI_REPO_NAME) < 0 and origin.find('/build') < 0:\n raise IDKWrongRunningDirectory(msg=\"Please run this process from the root your local MI git repository\")\n \n self.set(YAML_CONFIG_WORKING_REPO, idk_repo)\n self.set(YAML_CONFIG_START_RABBIT, False)\n self.set(YAML_CONFIG_START_COUCH, False)", "async def trigger_build(self, *, branch=None, message=None):", "def push_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n if opts.push:\n vprint (\"Pushing branch\", name)\n ex (\"cd $DOC_ROOT/\" + product + \" && git push origin refs/heads/\" + name,\n allow_fail=True)", "def prepare_deploy(ticket=None, msg=None, branch=None):\n test()\n commit(ticket, msg)\n push(branch)\n pull(branch)", "def apply_and_push(localrepo, remote, changer, max_attempts=10,\n ssh_username=None, ssh_key=None, force=False):\n assert callable(changer)\n branch = get_branch(localrepo)\n changer(localrepo, 1)\n for n in range(1, max_attempts+1):\n new_revs = []\n try:\n new_revs = out(src=localrepo, remote=remote,\n ssh_username=ssh_username,\n ssh_key=ssh_key)\n if len(new_revs) < 1:\n raise HgUtilError(\"No revs to push\")\n push(src=localrepo, remote=remote, ssh_username=ssh_username,\n ssh_key=ssh_key, force=force)\n return\n except subprocess.CalledProcessError, e:\n log.debug(\"Hit error when trying to push: %s\" % str(e))\n if n == max_attempts:\n log.debug(\"Tried %d times, giving up\" % max_attempts)\n for r in reversed(new_revs):\n run_cmd(['hg', 'strip', '-n', r[REVISION]], cwd=localrepo)\n raise HgUtilError(\"Failed to push\")\n pull(remote, localrepo, update_dest=False,\n ssh_username=ssh_username, ssh_key=ssh_key)\n # After we successfully rebase or strip away heads the push is\n # is attempted again at the start of the loop\n try:\n run_cmd(['hg', 'rebase'], cwd=localrepo)\n except subprocess.CalledProcessError, e:\n log.debug(\"Failed to rebase: %s\" % str(e))\n update(localrepo, branch=branch)\n for r in reversed(new_revs):\n run_cmd(['hg', 'strip', '-n', r[REVISION]], cwd=localrepo)\n changer(localrepo, n+1)", "def _get_rebasebranch(self):\n logging.info('--- Get Rebasebranch ---')\n local_branch_candidates = {\n branch for branch in self.local_branches\n if branch == self.options.rebasebranch}\n remote_branch_candidates = {\n branch for branch in self.remote_branches\n if self.options.rebasebranch in branch}\n try:\n found_local_branch = local_branch_candidates.pop()\n except KeyError:\n gitwrapper.exit_with_error(\n 'No local branches named %r found.',\n self.options.rebasebranch)\n #\n if local_branch_candidates:\n gitwrapper.exit_with_error(\n 'Too many matching local branches found: %s, %s.',\n found_local_branch,\n ', '.join(local_branch_candidates))\n #\n if not remote_branch_candidates:\n gitwrapper.exit_with_error(\n 'No remote branches named %r found.',\n self.options.rebasebranch)\n #\n if len(remote_branch_candidates) > 2:\n # 1 if remote is not pushed, 2 if its pushed to remote\n gitwrapper.exit_with_error(\n 'Too many matching remote branches found: %s.',\n ', '.join(remote_branch_candidates))\n #\n self.local_branches = {found_local_branch}\n self.remote_branches = remote_branch_candidates\n logging.info('Found local branch %r.', found_local_branch)\n logging.info(\n 'Found remote branches %s.'\n ' and '.join(repr(branch) for branch in self.remote_branches))\n # We only rebase the specified branch\n self.tags = set()", "def reset_branch(ctx, name, sha, hard):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo(\"Updating {} branch...\".format(name), break_line=False)\n gh.reset_branch(name=name, sha=sha, hard=hard)\n log.echo('Branch {} is now at {} '.format(name, sha), break_line=False)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise", "def rebase(self, *arguments, **kwargs):\n return self.get_output('rebase', *arguments, **kwargs)", "def merge_with(self, topic):\n with transaction.atomic():\n if self == topic:\n return self\n if (\n self.branched_from\n and topic.branched_from\n and self.branched_from != topic.branched_from\n ):\n raise ValueError(\"Cannot merge topics with different branched_from topics.\")\n if self.most_recent.semester >= topic.most_recent.semester:\n Course.objects.filter(topic=topic).update(topic=self)\n if topic.branched_from and not self.branched_from:\n self.branched_from = topic.branched_from\n self.save()\n topic.delete()\n return self\n else:\n Course.objects.filter(topic=self).update(topic=topic)\n if self.branched_from and not topic.branched_from:\n topic.branched_from = self.branched_from\n topic.save()\n self.delete()\n return topic", "def sync(self, fork_remote, rebase=False):\n\n self._print(' - Sync fork with upstream remote')\n if self.ref_type(self.default_ref) != 'branch':\n message = colored(' - Can only sync branches', 'red')\n self._print(message)\n self._exit(message)\n fork_remote_output = fmt.remote_string(fork_remote)\n branch_output = fmt.ref_string(self.truncate_ref(self.default_ref))\n if rebase:\n self._rebase_remote_branch(self.remote, self.truncate_ref(self.default_ref))\n else:\n self._pull(self.remote, self.truncate_ref(self.default_ref))\n self._print(' - Push to ' + fork_remote_output + ' ' + branch_output)\n command = ['git', 'push', fork_remote, self.truncate_ref(self.default_ref)]\n return_code = execute_command(command, self.repo_path, print_output=self.print_output)\n if return_code != 0:\n message = colored(' - Failed to push to ', 'red') + fork_remote_output + ' ' + branch_output\n self._print(message)\n self._print(fmt.command_failed_error(command))\n self._exit(message)", "def update_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n vprint ('Fast-forwarding', name, 'to', main_branch)\n ex (\"cd $DOC_ROOT/\" + product + \" && git fetch . \" + main_branch + \":\" + name)", "def pull(explicit=False):\n repo = git.repo()\n check_detached_head()\n saved_current_branch = repo.current_branch()\n\n commit()\n remote = remote_branch() \n\n # fetch. Dont use pull because we anyway have to local branches two deal\n # with: free and nice\n repo.fetch()\n\n # merge (updated) remote branch into free branch\n free = free_branch() \n if free:\n repo.checkout(free)\n repo.merge(remote)\n\n # rebase nice branch onto (updated) remote branch\n # todo: what if the above pull fails? Then the nice_branch is not rebased which leads to troubles later\n # todo: should be done automatically within pull if nice-branch is setuped correctly\n nice = nice_branch() \n if nice:\n repo.checkout(nice)\n repo.rebase(remote)\n\n if explicit:\n repo.checkout(saved_current_branch)", "def merge(self, ref):\n active = self._git.active_branch\n active_commit = self._git.active_branch.commit\n active_name = active.name\n merge_base = self._git.merge_base(active, ref)\n ref_commit = self._git.commit(ref)\n self._git.index.merge_tree(ref_commit, base=merge_base)\n merge_commit = self._git.index.commit(\n f\"Merged {ref} into {active_name}\",\n parent_commits=(active_commit, ref_commit),\n )\n self.log.error(\"MERGE %s\", merge_commit)\n self._git.active_branch.reference = merge_commit\n active.checkout()\n self._git.head.reset(index=True, working_tree=True)", "def rebase(\n self,\n rebase_source: str,\n message: Optional[str] = None,\n author: Optional[str] = None,\n ) -> dict:\n self._check_connection()\n\n if author is None:\n author = self._author\n if message is None:\n message = f\"Rebase from {rebase_source} by Python client {__version__}\"\n rc_args = {\"rebase_from\": rebase_source, \"author\": author, \"message\": message}\n return self._dispatch_json(\"post\", self._rebase_url(), rc_args)", "def ensure_branch_preflight(self, commit, branch_id):\n log = LOG.getChild('ensure_branch_preflight')\n branch = self.ctx.branch_dict().get(branch_id)\n # branch should never be None here. p4gf_branch_id.Assigner() must\n # create Branch objects for each assignment.\n\n if self._current_branch \\\n and self._current_branch.branch_id == branch_id:\n log.debug(\"sha={} want branch_id={} curr branch_id={} NOP\"\n .format( commit['sha1'][:7]\n , branch_id[:7]\n , self._current_branch.branch_id[:7]))\n log.debug(\"staying on branch {}\"\n .format(self.ctx.branch_dict().get(branch_id)))\n\n return branch\n\n cbid = self._current_branch.branch_id if self._current_branch else 'None'\n log.debug(\"sha={} want branch_id={} curr branch_id={} switch\"\n .format(commit['sha1'][:7], branch_id[:7], cbid[:7]))\n\n if not branch.view_lines:\n self.finish_branch_definition(commit, branch)\n\n elif branch.view_p4map:\n # if this is a stream branch, check for mutation of the stream's\n # view by comparing with the original view saved in p4gf_config2\n if branch.original_view_lines:\n original_view_lines = '\\n'.join(branch.original_view_lines)\n view_lines = p4gf_path_convert.convert_view_to_no_client_name(branch.view_lines)\n if not view_lines == original_view_lines:\n raise PreflightException(\n _('Unable to push. Stream view changed from:\\n'\n '{old_view}\\nto:\\n{new_view}')\n .format(old_view=original_view_lines, new_view=view_lines))\n # Find existing depot branch for branch view's LHS.\n lhs = branch.view_p4map.lhs()\n branch.depot_branch = self.ctx.depot_branch_info_index() \\\n .find_depot_path(lhs[0])\n\n log.debug(\"switching to branch {}\".format(branch))\n\n # By now we should have a branch and a branch.view_lines.\n # First remove current branch's files from workspace\n # Client spec is set to normdir\n self._current_branch = branch\n return branch", "def _switchBranch(self, release):\n if release is None:\n self.branch = None\n self.branch_dir = None\n log.info('No release branch available')\n else:\n self.wc.update()\n assert self.wc.exists('branches/' + release)\n io.linesToFile(self.path(self.BRANCH_FILE), [release])\n self.branch = release\n self.branch_dir = 'branches/' + release\n self.wc.update(self.branch_dir, depth='infinity')\n log.info('Working on branch ' + self.branch)", "def switch_branch(branch, rdir):\r\n # Get the latest remote\r\n try:\r\n cmd_log(['git', 'fetch', ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to fetch remote: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n\r\n # Check if the branch is available from the remote.\r\n cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of remote branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n if not branch in output:\r\n raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING)\r\n # Check it the remote branch has already been made locally\r\n cmd = ['git', 'branch', '-a', ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of local branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n branches = []\r\n for line in output.split('\\n'):\r\n branches.append(line.replace('*', '').strip())\r\n\r\n if branch not in branches:\r\n # Checkout with -b since it is remote only\r\n cmd = ['git', 'checkout', '--force', '--track',\r\n '-b', branch, 'origin/{0}'.format(branch), ]\r\n try:\r\n cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to checkout remote branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n # Go ahead and reset hard to the newest version of the branch now that we know\r\n # it is local.\r\n try:\r\n cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to reset to branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)", "def push(ctx):\n dufl_root = ctx.obj['dufl_root']\n git = Git(ctx.obj.get('git', '/usr/bin/git'), dufl_root)\n git.run('push', 'origin', git.working_branch())", "def base():\n wheels()\n build_base()\n push_base()" ]
[ "0.67438114", "0.6672996", "0.65347326", "0.6462883", "0.6459222", "0.618939", "0.6010252", "0.56230044", "0.5605947", "0.55762273", "0.5462592", "0.5313506", "0.5272005", "0.5262014", "0.5259698", "0.52522224", "0.52197486", "0.51008797", "0.5083094", "0.50617796", "0.5045152", "0.5028403", "0.50246406", "0.49939227", "0.49655142", "0.4948932", "0.494247", "0.4933606", "0.48812512", "0.48686966" ]
0.9047255
0
Merge topic branch then delete remotely and, optionally, locally.
def merge_and_cleanup(self): print('Checking out base branch and merging topic branch...') self.git.checkout(self.base_branch) self.git.merge('--ff-only', self.topic_branch) # Push merge and delete topic branch print('Pushing base branch with topic branch merged...') self.git.push() print('Deleting remote topic branch...') self.git.push('origin', ':{}'.format(self.topic_branch)) # Optionally delete local topic branch if self.delete_local: print('Deleting local topic branch...') self.git.branch('-D', self.topic_branch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_out_topic_branch_from_remote(self):\n self.git.checkout('-b', self.topic_branch, '{}/{}'.format(self.base_branch_remote(), self.topic_branch))", "def __gitDeleteBranch(self):\n self.vcs.gitDeleteRemoteBranch(self.project.getProjectPath())", "def delete_remote():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))", "def prune_branch_remote(self, branch, remote):\n\n branch_output = fmt.ref_string(branch)\n if not self.existing_remote_branch(branch, remote):\n self._print(' - Remote branch ' + branch_output + \" doesn't exist\")\n return\n try:\n self._print(' - Delete remote branch ' + branch_output)\n self.repo.git.push(remote, '--delete', branch)\n except GitError as err:\n message = colored(' - Failed to delete remote branch ', 'red') + branch_output\n self._print(message)\n self._print(fmt.error(err))\n self._exit(message)\n except (KeyboardInterrupt, SystemExit):\n self._exit()", "def prune_branch_local(self, branch, force):\n\n branch_output = fmt.ref_string(branch)\n if branch not in self.repo.heads:\n self._print(' - Local branch ' + branch_output + \" doesn't exist\")\n return\n prune_branch = self.repo.heads[branch]\n if self.repo.head.ref == prune_branch:\n ref_output = fmt.ref_string(self.truncate_ref(self.default_ref))\n try:\n self._print(' - Checkout ref ' + ref_output)\n self.repo.git.checkout(self.truncate_ref(self.default_ref))\n except GitError as err:\n message = colored(' - Failed to checkout ref', 'red') + ref_output\n self._print(message)\n self._print(fmt.error(err))\n self._exit(message)\n except (KeyboardInterrupt, SystemExit):\n self._exit()\n try:\n self._print(' - Delete local branch ' + branch_output)\n self.repo.delete_head(branch, force=force)\n return\n except GitError as err:\n message = colored(' - Failed to delete local branch ', 'red') + branch_output\n self._print(message)\n self._print(fmt.error(err))\n self._exit(message)\n except (KeyboardInterrupt, SystemExit):\n self._exit()", "def __init__(self, base_branch, topic_branch=None, delete_local=False):\n super(TopicMerge, self).__init__(base_branch)\n\n self.topic_branch = topic_branch\n self.delete_local = delete_local\n\n if not topic_branch:\n self.topic_branch = self.active_branch()\n print(\"Using active branch '{}' for topic branch.\".format(self.topic_branch))\n\n if self.topic_branch == self.base_branch:\n raise Exception(\"Topic branch and base branch shouldn't be the same.\")", "def test_branch_deleted(local):\n pytest.run(local, ['git', 'checkout', 'feature'])\n pytest.run(local, ['git', 'push', 'origin', '--delete', 'feature'])\n local.join('README').write('Changed by local.')\n\n # Run.\n actual = commit_and_push(str(local), 'origin', Versions(REMOTES))\n assert actual is True\n pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--']) # Exit 0 if nothing changed.\n assert local.join('README').read() == 'Changed by local.'", "def delete_branch_on_merge(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n def _set_delete_branch_on_merge(change: Change[str]) -> Change[str]:\n print_debug(\"[%s] Enforcing branch deletion on PR merge\" % highlight(repo.name))\n try:\n repo.edit(delete_branch_on_merge=value)\n except GithubException:\n return change.failure()\n\n return change.success()\n\n if not repo.delete_branch_on_merge:\n change = Change(\n meta=ChangeMetadata(\n executor=_set_delete_branch_on_merge,\n ),\n action=ChangeActions.REPLACE,\n before=\"On PR merge: %s\" % \"Delete branch\" if repo.delete_branch_on_merge else \"Keep branch\",\n after=\"On PR merge: %s\" % \"Delete branch\" if value else \"Keep branch\"\n )\n return [change]\n return []", "def execute(self: \"DeleteBranchOperator\", context: Dict[str, Any]) -> Any:\n hook = NessieHook(conn_id=self.conn_id)\n\n hook.delete_reference(self.branch)", "def rebase_topic_branch_and_push(self):\n # Rebase topic branch\n print('Checking out topic branch..')\n self.git.checkout(self.topic_branch)\n print('Updating topic branch with work from base branch...')\n self.git.rebase(self.base_branch)\n\n # Push rebased version (so it'll get marked as merged later if on\n # Github)\n print('Pushing updated topic branch...')\n self.git.push('--force')", "def delete_branch(api_access_token: str, repo: str, ref: str) -> response.Response:\n api = github.Github(api_access_token)\n\n repository = api.get_repo(repo)\n repository_ref = repository.get_git_ref('heads/{}'.format(ref))\n repository_ref.delete()\n\n return response.success('Successfully deleted \"{}\" from repository \"{}\"'.format(ref, repo))", "def delete_branch(self):\n for p in self.get_branch():\n if p.kind == 'image':\n self.get(p.uid).delete_image()\n else: \n # delete related tags\n for t in self.Tag.list(page=p.uid):\n t.delete()\n # delete page \n p.delete()", "def update_branch(branch, repo, options):\n update = None\n\n remote = repo.get_merge_branch(branch)\n if not remote:\n gbp.log.warn(\"No branch tracking '%s' found - skipping.\" % branch)\n return False\n\n can_fast_forward, up_to_date = repo.is_fast_forward(branch, remote)\n\n if up_to_date: # Great, we're done\n gbp.log.info(\"Branch '%s' is already up to date.\" % branch)\n return True\n\n if can_fast_forward:\n update = 'merge'\n else:\n if options.force == 'merge':\n gbp.log.info(\"Non-fast forwarding '%s' due to --force=merge\" % branch)\n update = 'merge'\n elif options.force == 'clean':\n gbp.log.info(\"Checking out clean copy of '%s' due to --force=clean\" % branch)\n update = 'clean'\n else:\n gbp.log.warn(\"Skipping non-fast forward of '%s' - use --force or \"\n \"update manually\" % branch)\n\n if update:\n gbp.log.info(\"Updating '%s'\" % branch)\n if repo.branch == branch:\n if update == 'merge':\n repo.merge(remote)\n elif update == 'clean':\n # Have to drop our current branch\n tmpbranch = \"_gbptmp-\"+branch\n gbp.log.debug(\"Checking out '%s' to '%s'\" % (remote, tmpbranch))\n repo.create_branch(tmpbranch, remote)\n gbp.log.debug(\"Switching current branch to '%s'\" % (tmpbranch))\n repo.set_branch(tmpbranch)\n gbp.log.debug(\"Dropping branch '%s'\" % branch)\n repo.delete_branch(branch)\n gbp.log.info(\"Renaming branch '%s' to '%s'\" % (tmpbranch, branch))\n repo.rename_branch(tmpbranch, branch)\n else:\n if can_fast_forward or (update == 'clean'):\n sha1 = repo.rev_parse(remote)\n repo.update_ref(\"refs/heads/%s\" % branch, sha1,\n msg=\"gbp: forward %s to %s\" % (branch, remote))\n elif update == 'merge':\n # Merge other branch, if it cannot be fast-forwarded\n current_branch=repo.branch\n repo.set_branch(branch)\n repo.merge(remote)\n repo.set_branch(current_branch)\n\n return (update != None)", "def reset(self, depth=0):\n\n if self.ref_type(self.default_ref) == 'branch':\n branch = self.truncate_ref(self.default_ref)\n branch_output = fmt.ref_string(branch)\n if not self.existing_local_branch(branch):\n return_code = self._create_branch_local_tracking(branch, self.remote, depth=depth, fetch=True)\n if return_code != 0:\n message = colored(' - Failed to create tracking branch ', 'red') + branch_output\n self._print(message)\n self._exit(message)\n return\n elif self._is_branch_checked_out(branch):\n self._print(' - Branch ' + branch_output + ' already checked out')\n else:\n self._checkout_branch_local(branch)\n remote_output = fmt.remote_string(self.remote)\n if not self.existing_remote_branch(branch, self.remote):\n message = colored(' - No existing remote branch ', 'red') + remote_output + ' ' + branch_output\n self._print(message)\n self._exit(message)\n self.fetch(self.remote, ref=self.default_ref, depth=depth)\n self._print(' - Reset branch ' + branch_output + ' to ' + remote_output + ' ' + branch_output)\n remote_branch = self.remote + '/' + branch\n self._reset_head(branch=remote_branch)\n elif self.ref_type(self.default_ref) == 'tag':\n self.fetch(self.remote, ref=self.default_ref, depth=depth)\n self._checkout_tag(self.truncate_ref(self.default_ref))\n elif self.ref_type(self.default_ref) == 'sha':\n self.fetch(self.remote, ref=self.default_ref, depth=depth)\n self._checkout_sha(self.default_ref)", "def abort_merge():\n common.safe_git_call('merge --abort')", "def _remove_master(self, author, dup_id, master_id):\n self.env.log.debug(\"master: undupping %s to %s\" % (dup_id, master_id))\n\n # tag the master ticket at each step\n cmt = u\"Ticket #%s has been removed as a \" \\\n \"duplicate of ticket #%s.\" % (dup_id, master_id)\n\n self._update_master(author, cmt, master_id)", "def branch_delete(request, branch_id):\n branch = models.Branch.get_by_id(int(branch_id))\n if branch.owner != request.user:\n return HttpTextResponse('You do not own this branch', status=403)\n\n repo_key = branch.repo_key\n branch.key.delete()\n num_branches = models.Branch.query(models.Branch.repo_key == repo_key).count()\n if not num_branches:\n # Even if we don't own the repository? Yes, I think so! Empty\n # repositories have no representation on screen.\n repo_key.delete()\n\n return HttpResponseRedirect(reverse(repos))", "def reset_branch_to_remote(repo, branch, hard=True):\n remote = repo.get_branch_remote(branch)\n kw = dict(remote=remote, branch=branch)\n if hard:\n kw['flags'] = '--hard'\n repo.issue('git reset {flags} {remote}/{branch}'.format(**kw))", "def delete_remote_tag(tag, remote='origin', directory=None):\n execute_command('git push {0} :{1}'.format(remote, tag), shell=True,\n cwd=directory)", "def remove_tmp_branches(self):\n if 'master-tmp' in self.repo.branches:\n print('Removing the temporary branches')\n if self.repo.curbranch in ('master-tmp', 'etc-tmp',\n 'timestamps-tmp'):\n self.repo.checkout('master')\n for branch in ('master', 'etc', 'timestamps'):\n tmp_branch = '%s-tmp' % branch\n if not self.dry_run:\n if branch in ('master', 'etc'):\n # If there is a merge to be done then tag the branch\n # before the merge.\n if (self.repo.git_cmd('rev-list %s..%s' %\n (branch, tmp_branch))):\n self.repo.git_cmd('tag -f %s-prev %s' %\n (branch, branch))\n self.repo.checkout(branch)\n self.repo.git_cmd('merge %s' % tmp_branch)\n self.repo.git_cmd('branch -D %s' % tmp_branch)", "def clean_master():", "def pull(ctx, path_base):\n with ctx.cd(path_base):\n ctx.run('git reset --hard')\n ctx.run('git pull origin master')", "def git_pull():\n\n puts(yellow(\"Pull master from GitHub\"))\n with cd(env.source_dir):\n run('git reset --hard HEAD')\n run('git pull')", "def ensure_sync_master_branch(self):\n # TODO(robertocn): Investigate what causes the states mentioned in the\n # docstring in the first place.\n self.api.m.git('update-ref', 'refs/heads/master',\n 'refs/remotes/origin/master')\n self.api.m.git('checkout', 'master', cwd=self.api.m.path['checkout'])", "def test_reset_to_remote_after_rebase(self) -> None:\n (\n self.repo_sandbox\n .new_branch(\"branch-0\")\n .commit()\n .push()\n .new_branch(\"branch-1\")\n .commit()\n .push()\n .check_out(\"branch-0\")\n .commit()\n )\n rewrite_branch_layout_file(\"branch-0\\n\\tbranch-1\")\n\n with fixed_author_and_committer_date_in_past():\n assert_success(\n [\"traverse\", \"-y\"],\n \"\"\"\n Pushing branch-0 to origin...\n\n Checking out branch-1\n\n branch-0\n |\n x-branch-1 *\n\n Rebasing branch-1 onto branch-0...\n\n Branch branch-1 diverged from (and has older commits than) its remote counterpart origin/branch-1.\n Resetting branch branch-1 to the commit pointed by origin/branch-1...\n\n branch-0\n |\n x-branch-1 *\n\n Reached branch branch-1 which has no successor; nothing left to update\n \"\"\"\n )", "def pull(explicit=False):\n repo = git.repo()\n check_detached_head()\n saved_current_branch = repo.current_branch()\n\n commit()\n remote = remote_branch() \n\n # fetch. Dont use pull because we anyway have to local branches two deal\n # with: free and nice\n repo.fetch()\n\n # merge (updated) remote branch into free branch\n free = free_branch() \n if free:\n repo.checkout(free)\n repo.merge(remote)\n\n # rebase nice branch onto (updated) remote branch\n # todo: what if the above pull fails? Then the nice_branch is not rebased which leads to troubles later\n # todo: should be done automatically within pull if nice-branch is setuped correctly\n nice = nice_branch() \n if nice:\n repo.checkout(nice)\n repo.rebase(remote)\n\n if explicit:\n repo.checkout(saved_current_branch)", "def delete_project_branch(self, project, branch):\n params = {\n 'project': project,\n 'branch': branch\n }\n self.sonarqube.make_call('post', API_PROJECT_BRANCHES_DELETE_ENDPOINT, **params)", "def _delete(self, ref: str) -> None:\n self._trace(\"deleting ref %s\" % ref)\n head = self.read_symbolic_ref(\"HEAD\")\n if head and ref == head[1]:\n _write(\"error %s refusing to delete the current branch: %s\" % (ref, head))\n return\n try:\n self._connection.files_delete(self._ref_path(ref))\n except dropbox.exceptions.ApiError as e:\n if not isinstance(e.error, dropbox.files.DeleteError):\n raise\n # someone else might have deleted it first, that's fine\n self._refs.pop(ref, None) # discard\n self._pushed.pop(ref, None) # discard\n _write(\"ok %s\" % ref)", "def checkout(branch=\"lf-dev\"):\n with cd(FOLDER):\n sudo('git fetch', user='tomcat')\n sudo('git checkout %s' % branch, user='tomcat')\n status()", "def deploy_pull_master(self, restart=True):\n self.ops.local(\"cd \"+self.local_path+\"/src && git reset --hard HEAD && git pull origin master && git submodule update\")\n PiService.deploy(self, restart)" ]
[ "0.6874943", "0.6639755", "0.6599267", "0.62721497", "0.61086285", "0.60286224", "0.58906126", "0.58730567", "0.5793986", "0.5737783", "0.5683312", "0.56208444", "0.5504748", "0.54956734", "0.5470647", "0.53895134", "0.5362018", "0.52970517", "0.5293758", "0.528265", "0.5230892", "0.52236646", "0.51508415", "0.51361793", "0.51088244", "0.5107939", "0.50835973", "0.5024411", "0.5021137", "0.5019471" ]
0.78640497
0
Check out local version of topic branch.
def check_out_topic_branch_from_remote(self): self.git.checkout('-b', self.topic_branch, '{}/{}'.format(self.base_branch_remote(), self.topic_branch))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkout(branch=\"lf-dev\"):\n with cd(FOLDER):\n sudo('git fetch', user='tomcat')\n sudo('git checkout %s' % branch, user='tomcat')\n status()", "def checkout_latest():\n with cd(env.repo_path):\n run('git checkout %(branch)s;' % env)\n run('git pull origin %(branch)s' % env)", "def create_topic_branch(self, topic_branch_name):\n print(\"Creating topic branch locally...\")\n self.git.checkout(self.base_branch)\n self.git.checkout('-b', topic_branch_name)\n print(\"Pushing topic branch to base branch's remote...\")\n self.git.push('-u', self.base_branch_remote(), topic_branch_name)", "def checkout_nightly_version(branch, spdir):\n nightly_version = _nightly_version(spdir)\n cmd = [\"git\", \"checkout\", \"-b\", branch, nightly_version]\n p = subprocess.run(cmd, check=True)", "def stable():\n env.branch = 'stable'", "def update_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n vprint ('Fast-forwarding', name, 'to', main_branch)\n ex (\"cd $DOC_ROOT/\" + product + \" && git fetch . \" + main_branch + \":\" + name)", "def rebase_topic_branch_and_push(self):\n # Rebase topic branch\n print('Checking out topic branch..')\n self.git.checkout(self.topic_branch)\n print('Updating topic branch with work from base branch...')\n self.git.rebase(self.base_branch)\n\n # Push rebased version (so it'll get marked as merged later if on\n # Github)\n print('Pushing updated topic branch...')\n self.git.push('--force')", "def master():\n env.branch = 'master'", "def master():\n env.branch = 'master'", "def git_branch(self, app, branch):\n if app == self.PROJECT_NAME:\n app_path = self.PROJECT_DIR\n else:\n raise ValueError('Unknown app')\n\n with lcd(app_path):\n self.local('git pull && git checkout %s' % branch)\n\n self.display('%s has been successfully switched to tag/branch %s.' % (app, branch), color='green')", "def verify_up_to_date(path, branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch up to date:\")\n run_in_component(path, ['git', 'remote', 'update'])\n\n result = run_in_component(path, ['git', 'rev-list', 'HEAD...origin/%s' % branch, '--count'])\n count = int(result.strip())\n\n if count == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You branch is not up-to-date with remote branch: %d different commits\" % count)", "def ensure_sync_master_branch(self):\n # TODO(robertocn): Investigate what causes the states mentioned in the\n # docstring in the first place.\n self.api.m.git('update-ref', 'refs/heads/master',\n 'refs/remotes/origin/master')\n self.api.m.git('checkout', 'master', cwd=self.api.m.path['checkout'])", "def checkout_qmk():\n if exists('qmk_firmware'):\n rmtree('qmk_firmware')\n\n if not fetch_source(repo_name(QMK_GIT_URL)):\n git_clone(QMK_GIT_URL, QMK_GIT_BRANCH)", "def on_remoteBranchButton_toggled(self, checked):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def gitCheckoutBranch(self, path, branch):\r\n\r\n with workInDirectory(path):\r\n fetch_cmd = [\"git\", \"fetch\"]\r\n if self.verbose:\r\n print(\"Runing Command : {}\".format(\" \".join(fetch_cmd)))\r\n\r\n SubProcessUtility.runCommand(fetch_cmd)\r\n\r\n checkout_branch_command = [\"git\", \"checkout\", branch]\r\n if self.verbose:\r\n print(\"Running Command : {}\".format(\" \".join(checkout_branch_command)))\r\n SubProcessUtility.runCommand(checkout_branch_command)", "def test_branch_deleted(local):\n pytest.run(local, ['git', 'checkout', 'feature'])\n pytest.run(local, ['git', 'push', 'origin', '--delete', 'feature'])\n local.join('README').write('Changed by local.')\n\n # Run.\n actual = commit_and_push(str(local), 'origin', Versions(REMOTES))\n assert actual is True\n pytest.run(local, ['git', 'diff-index', '--quiet', 'HEAD', '--']) # Exit 0 if nothing changed.\n assert local.join('README').read() == 'Changed by local.'", "def main(branch):\n try:\n # Ensure that we're in a git repository. This command is silent unless\n # you're not actually in a git repository, in which case, you receive a\n # \"Not a git repository\" error message.\n output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8')\n sys.stdout.write(output)\n except subprocess.CalledProcessError:\n # Bail if we're not in a git repository.\n return\n\n # This behavior ensures a better user experience for those that aren't\n # intimately familiar with git.\n ensure_remote_branch_is_tracked(branch)\n\n # Switch to the specified branch and update it.\n subprocess.check_call(['git', 'checkout', '--quiet', branch])\n\n # Pulling is always safe here, because we never commit to this branch.\n subprocess.check_call(['git', 'pull', '--quiet'])\n\n # Checkout the top commit in the branch, effectively going \"untracked.\"\n subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch])\n\n # Clean up the repository of Python cruft. Because we've just switched\n # branches and compiled Python files should not be version controlled,\n # there are likely leftover compiled Python files sitting on disk which may\n # confuse some tools, such as sqlalchemy-migrate.\n subprocess.check_call(['find', '.', '-name', '\"*.pyc\"', '-delete'])\n\n # For the sake of user experience, give some familiar output.\n print('Your branch is up to date with branch \\'origin/%s\\'.' % branch)", "def push_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n if opts.push:\n vprint (\"Pushing branch\", name)\n ex (\"cd $DOC_ROOT/\" + product + \" && git push origin refs/heads/\" + name,\n allow_fail=True)", "def checkout(revision):\n subprocess.run(\n ['git', 'checkout', revision],\n check=True\n )", "def prune_branch_local(self, branch, force):\n\n branch_output = fmt.ref_string(branch)\n if branch not in self.repo.heads:\n self._print(' - Local branch ' + branch_output + \" doesn't exist\")\n return\n prune_branch = self.repo.heads[branch]\n if self.repo.head.ref == prune_branch:\n ref_output = fmt.ref_string(self.truncate_ref(self.default_ref))\n try:\n self._print(' - Checkout ref ' + ref_output)\n self.repo.git.checkout(self.truncate_ref(self.default_ref))\n except GitError as err:\n message = colored(' - Failed to checkout ref', 'red') + ref_output\n self._print(message)\n self._print(fmt.error(err))\n self._exit(message)\n except (KeyboardInterrupt, SystemExit):\n self._exit()\n try:\n self._print(' - Delete local branch ' + branch_output)\n self.repo.delete_head(branch, force=force)\n return\n except GitError as err:\n message = colored(' - Failed to delete local branch ', 'red') + branch_output\n self._print(message)\n self._print(fmt.error(err))\n self._exit(message)\n except (KeyboardInterrupt, SystemExit):\n self._exit()", "def checkout_branches(args):\n\n ensure_tracking_branches([])\n if check_dirty([]) and '-f' not in args:\n raise Exception(\"Cannot checkout new branches with dirty projects.\")\n \n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Checking out tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n # Check that sucker out\n repo.check_command([\"checkout\", project.tracking_branch])", "def test_default_repo_branch(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Target: ywangd:dev\", output)", "def branch(branch_name):\n env.branch = branch_name", "def branch(branch_name):\n env.branch = branch_name", "def checkout_v8():\n if not OFFLINE_MODE:\n exec_cmd('git fetch --tags',\n cwd=V8_HOME,\n msg='Fetch the release tag information')\n\n exec_cmd('git checkout', V8_GIT_TAG,\n cwd=V8_HOME,\n msg='Checkout Google V8 v' + V8_GIT_TAG)", "def switchToBranch(self):\n branches = self._listBranches()\n if not branches:\n raise error.ExpectationFailed(\n 'No branches available. Please import one.')\n\n choice = io.getChoice('Available release branches:',\n 'Your choice?',\n branches,\n suggest=len(branches)-1)\n self._switchBranch(branches[choice])", "def test_master(self, tmpgitdir, branch):\n with tmpgitdir.join('file_a.txt').open('w') as handle:\n handle.write('first file')\n\n subprocess.check_call(['git', 'checkout', '-b', branch])\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'first'])\n\n assert git_head_ref_name(tmpgitdir) == branch", "def branch(self, current_path):\n p = subprocess.Popen(\n [\"git\", \"show-ref\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n results = []\n try:\n current_branch = self.get_current_branch(current_path)\n for line in output.decode(\"utf-8\").splitlines():\n # The format for git show-ref is '<SHA-1 ID> <space> <reference name>'\n # For this method we are only interested in reference name.\n # Reference : https://git-scm.com/docs/git-show-ref#_output\n commit_sha = line.strip().split()[0].strip()\n reference_name = line.strip().split()[1].strip()\n if self._is_branch(reference_name):\n branch_name = self._get_branch_name(reference_name)\n is_current_branch = self._is_current_branch(\n branch_name, current_branch\n )\n is_remote_branch = self._is_remote_branch(reference_name)\n upstream_branch_name = None\n if not is_remote_branch:\n upstream_branch_name = self.get_upstream_branch(\n current_path, branch_name\n )\n tag = self._get_tag(current_path, commit_sha)\n results.append(\n {\n \"is_current_branch\": is_current_branch,\n \"is_remote_branch\": is_remote_branch,\n \"name\": branch_name,\n \"upstream\": upstream_branch_name,\n \"top_commit\": commit_sha,\n \"tag\": tag,\n }\n )\n\n # Remote branch is seleted use 'git branch -a' as fallback machanism\n # to get add detached head on remote branch to preserve older functionality\n # TODO : Revisit this to checkout new local branch with same name as remote\n # when the remote branch is seleted, VS Code git does the same thing.\n if current_branch == \"HEAD\":\n results.append(\n {\n \"is_current_branch\": True,\n \"is_remote_branch\": False,\n \"name\": self._get_detached_head_name(current_path),\n \"upstream\": None,\n \"top_commit\": None,\n \"tag\": None,\n }\n )\n return {\"code\": p.returncode, \"branches\": results}\n except Exception as downstream_error:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": str(downstream_error),\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": error.decode(\"utf-8\"),\n }", "async def version_command(self, ctx):\n member = ctx.message.server.get_member(self.bot.user.id)\n current_commit = get_current_commit()\n commit_url = member.game.url + '/commit/' + current_commit\n msg = await self.bot.send_message(ctx.message.channel, 'I am currently running on commit `{}`\\n\\n{}'.format(current_commit, commit_url))", "def test_branching(self):\r\n repo_dir = self.GIT_REPO_DIR\r\n # Test successful import from command\r\n if not os.path.isdir(repo_dir):\r\n os.mkdir(repo_dir)\r\n self.addCleanup(shutil.rmtree, repo_dir)\r\n\r\n # Checkout non existent branch\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):\r\n git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')\r\n\r\n # Checkout new branch\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n def_ms = modulestore()\r\n # Validate that it is different than master\r\n self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n\r\n # Attempt to check out the same branch again to validate branch choosing\r\n # works\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n self.TEST_BRANCH)\r\n\r\n # Delete to test branching back to master\r\n delete_course(def_ms, contentstore(),\r\n self.TEST_BRANCH_COURSE,\r\n True)\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n git_import.add_repo(self.TEST_REPO,\r\n repo_dir / 'edx4edx_lite',\r\n 'master')\r\n self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))\r\n self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))" ]
[ "0.6593687", "0.6444415", "0.6442575", "0.6345166", "0.6301744", "0.6256093", "0.62468576", "0.616578", "0.616578", "0.61320764", "0.6116163", "0.6076133", "0.6060944", "0.59473294", "0.58994496", "0.5847794", "0.57866263", "0.577557", "0.57550055", "0.57492787", "0.5731519", "0.57304114", "0.5677794", "0.5677794", "0.56757003", "0.5647268", "0.56379235", "0.5636156", "0.5625353", "0.56210154" ]
0.7610128
0
Check whether a branch exists remotely using base branch's origin.
def remote_branch_exists(self, branch): try: self.git.show_ref("refs/remotes/{}/{}".format(self.base_branch_remote(), branch)) return True except git.exc.GitCommandError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def branch_exists(repo, branch, remote=False):\n ref = 'refs/remotes/origin/' + branch if remote else 'refs/heads/' + branch\n return subprocess.call(['git', 'show-ref', '-q', '--verify', ref],\n cwd=repo) == 0", "def branch_exists(branch_name, local_only=False, directory=None):\n for branch in get_branches(local_only, directory):\n if branch.startswith('remotes/'):\n branch = branch.split('/')\n if len(branch) > 2:\n branch = '/'.join(branch[2:])\n if branch_name == branch:\n return True\n else:\n if branch_name == branch:\n return True\n return False", "def branch_exists(branch):\n\n try:\n git('show-ref', branch)\n return True\n except subprocess.CalledProcessError:\n return False", "def _is_remote_branch(self, branch_reference):\n return branch_reference.startswith(\"refs/remotes/\")", "def local_branch_exists(self, branch):\n return branch in self.repo.branches", "def git_repo_branch_exists(repo: str, branch: str) -> bool:\n get_git_version()\n cmd = f\"git ls-remote {repo} {branch}\"\n # We might be tempted to use `--exit-code` with `git ls-remote`, but\n # `run_command` handles the `returncode` for us, so we'll rely on\n # the fact that stdout returns '' if the requested branch doesn't exist\n ret = run_command(cmd, capture=True)\n exists = ret.stdout != \"\"\n return exists", "def is_remote_reserve_branch_present(repo):\n reserve_name = phlgitu_ref.Name(_RESERVE_BRANCH_FQ_NAME)\n remote_ref_names = repo(\"ls-remote\").split()[1::2]\n return reserve_name.fq in remote_ref_names", "def ensure_remote_branch_is_tracked(branch):\n if branch == MASTER_BRANCH:\n # We don't need to explicitly track the master branch, so we're done.\n return\n\n # Ensure the specified branch is in the local branch list.\n output = subprocess.check_output(['git', 'branch', '--list'])\n for line in output.split('\\n'):\n if line.strip() == branch:\n # We are already tracking the remote branch\n break\n else:\n # We are not tracking the remote branch, so track it.\n try:\n sys.stdout.write(subprocess.check_output(\n ['git', 'checkout', '--track', 'origin/%s' % branch]))\n except subprocess.CalledProcessError:\n # Bail gracefully.\n raise SystemExit(1)", "def has_branch(self, branch):\n if self.branch == branch:\n return True\n return False", "def _is_branch(self, reference_name):\n return reference_name.startswith(\"refs/heads/\") or reference_name.startswith(\n \"refs/remotes/\"\n )", "def remote_exists(location, remote):\n ensure_dir(location)\n with utils.cd(location):\n cmd = '/usr/bin/git config --local --get remote.{}.url'.format(remote)\n return subprocess.call(cmd, shell=True) == 0", "def branch_exists(nametag, branches):\n for branch in branches:\n if branches[branch].name == nametag:\n return True\n return False", "def check_branch(subcommand, branch):\n if subcommand != \"checkout\":\n return\n # first make sure actual branch name was given\n if branch is None:\n return \"Branch name to checkout must be supplied with '-b' option\"\n # next check that the local repo is clean\n cmd = [\"git\", \"status\", \"--untracked-files=no\", \"--porcelain\"]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, universal_newlines=True)\n if p.stdout.strip():\n return \"Need to have clean working tree to checkout!\\n\\n\" + p.stdout\n # next check that the branch name doesn't already exist\n cmd = [\"git\", \"show-ref\", \"--verify\", \"--quiet\", \"refs/heads/\" + branch]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n if not p.returncode:\n return f\"Branch {branch!r} already exists\"", "def base_branch_remote(self):\n return self.git.config('--get', 'branch.{}.remote'.format(self.base_branch))", "def switch_branch(branch, rdir):\r\n # Get the latest remote\r\n try:\r\n cmd_log(['git', 'fetch', ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to fetch remote: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n\r\n # Check if the branch is available from the remote.\r\n cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of remote branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n if not branch in output:\r\n raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING)\r\n # Check it the remote branch has already been made locally\r\n cmd = ['git', 'branch', '-a', ]\r\n try:\r\n output = cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Getting a list of local branches failed: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n branches = []\r\n for line in output.split('\\n'):\r\n branches.append(line.replace('*', '').strip())\r\n\r\n if branch not in branches:\r\n # Checkout with -b since it is remote only\r\n cmd = ['git', 'checkout', '--force', '--track',\r\n '-b', branch, 'origin/{0}'.format(branch), ]\r\n try:\r\n cmd_log(cmd, rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to checkout remote branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)\r\n # Go ahead and reset hard to the newest version of the branch now that we know\r\n # it is local.\r\n try:\r\n cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to reset to branch: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_BRANCH)", "def is_branch(wit_path, branch):\n\n branches = _get_references_data(wit_path)\n del branches['HEAD']\n return branch in branches.keys()", "def _get_rebasebranch(self):\n logging.info('--- Get Rebasebranch ---')\n local_branch_candidates = {\n branch for branch in self.local_branches\n if branch == self.options.rebasebranch}\n remote_branch_candidates = {\n branch for branch in self.remote_branches\n if self.options.rebasebranch in branch}\n try:\n found_local_branch = local_branch_candidates.pop()\n except KeyError:\n gitwrapper.exit_with_error(\n 'No local branches named %r found.',\n self.options.rebasebranch)\n #\n if local_branch_candidates:\n gitwrapper.exit_with_error(\n 'Too many matching local branches found: %s, %s.',\n found_local_branch,\n ', '.join(local_branch_candidates))\n #\n if not remote_branch_candidates:\n gitwrapper.exit_with_error(\n 'No remote branches named %r found.',\n self.options.rebasebranch)\n #\n if len(remote_branch_candidates) > 2:\n # 1 if remote is not pushed, 2 if its pushed to remote\n gitwrapper.exit_with_error(\n 'Too many matching remote branches found: %s.',\n ', '.join(remote_branch_candidates))\n #\n self.local_branches = {found_local_branch}\n self.remote_branches = remote_branch_candidates\n logging.info('Found local branch %r.', found_local_branch)\n logging.info(\n 'Found remote branches %s.'\n ' and '.join(repr(branch) for branch in self.remote_branches))\n # We only rebase the specified branch\n self.tags = set()", "def branch(self, current_path):\n p = subprocess.Popen(\n [\"git\", \"show-ref\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n results = []\n try:\n current_branch = self.get_current_branch(current_path)\n for line in output.decode(\"utf-8\").splitlines():\n # The format for git show-ref is '<SHA-1 ID> <space> <reference name>'\n # For this method we are only interested in reference name.\n # Reference : https://git-scm.com/docs/git-show-ref#_output\n commit_sha = line.strip().split()[0].strip()\n reference_name = line.strip().split()[1].strip()\n if self._is_branch(reference_name):\n branch_name = self._get_branch_name(reference_name)\n is_current_branch = self._is_current_branch(\n branch_name, current_branch\n )\n is_remote_branch = self._is_remote_branch(reference_name)\n upstream_branch_name = None\n if not is_remote_branch:\n upstream_branch_name = self.get_upstream_branch(\n current_path, branch_name\n )\n tag = self._get_tag(current_path, commit_sha)\n results.append(\n {\n \"is_current_branch\": is_current_branch,\n \"is_remote_branch\": is_remote_branch,\n \"name\": branch_name,\n \"upstream\": upstream_branch_name,\n \"top_commit\": commit_sha,\n \"tag\": tag,\n }\n )\n\n # Remote branch is seleted use 'git branch -a' as fallback machanism\n # to get add detached head on remote branch to preserve older functionality\n # TODO : Revisit this to checkout new local branch with same name as remote\n # when the remote branch is seleted, VS Code git does the same thing.\n if current_branch == \"HEAD\":\n results.append(\n {\n \"is_current_branch\": True,\n \"is_remote_branch\": False,\n \"name\": self._get_detached_head_name(current_path),\n \"upstream\": None,\n \"top_commit\": None,\n \"tag\": None,\n }\n )\n return {\"code\": p.returncode, \"branches\": results}\n except Exception as downstream_error:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": str(downstream_error),\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git show-ref\",\n \"message\": error.decode(\"utf-8\"),\n }", "def start(self, remote, branch, depth, tracking):\n\n if branch not in self.repo.heads:\n if not is_offline():\n return_code = self.fetch(remote, ref=branch, depth=depth)\n if return_code != 0:\n sys.exit(1)\n return_code = self._create_branch_local(branch)\n if return_code != 0:\n self._exit('', return_code=return_code)\n return_code = self._checkout_branch_local(branch)\n if return_code != 0:\n self._exit('', return_code=return_code)\n else:\n branch_output = fmt.ref_string(branch)\n print(' - ' + branch_output + ' already exists')\n correct_branch = self._is_branch_checked_out(branch)\n if correct_branch:\n print(' - On correct branch')\n else:\n return_code = self._checkout_branch_local(branch)\n if return_code != 0:\n self._exit('', return_code=return_code)\n if tracking and not is_offline():\n self._create_branch_remote_tracking(branch, remote, depth)", "def master_branch(branch_name):\n\n if branch_name in MASTER_BRANCHES:\n return True\n\n return False", "def pull(repo: str, branch='master') -> bool:\n if not repo:\n raise NotADirectoryError\n repo=repo.replace('\\\\','/')\n # first checkout HEAD\n cmd = ['git', 'checkout', 'master', '--quiet', '--force']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n print(\"error during git checkout master\", result)\n return False\n\n cmd = ['git', 'pull', 'origin', branch, '--quiet']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n print(\"error durign pull\", result)\n return False\n return result.returncode == 0", "def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):\n # Check for each named branch if we're creating new remote heads.\n # To be a remote head after push, node must be either:\n # - unknown locally\n # - a local outgoing head descended from update\n # - a remote head that's known locally and not\n # ancestral to an outgoing head\n if remoteheads == [nullid]:\n # remote is empty, nothing to check.\n return\n\n if remote.capable('branchmap'):\n headssum = _headssummary(repo, remote, outgoing)\n else:\n headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)\n newbranches = [branch for branch, heads in headssum.iteritems()\n if heads[0] is None]\n # 1. Check for new branches on the remote.\n if newbranches and not newbranch: # new branch requires --new-branch\n branchnames = ', '.join(sorted(newbranches))\n raise util.Abort(_(\"push creates new remote branches: %s!\")\n % branchnames,\n hint=_(\"use 'hg push --new-branch' to create\"\n \" new remote branches\"))\n\n # 2 compute newly pushed bookmarks. We\n # we don't warned about bookmarked heads.\n localbookmarks = repo._bookmarks\n remotebookmarks = remote.listkeys('bookmarks')\n bookmarkedheads = set()\n for bm in localbookmarks:\n rnode = remotebookmarks.get(bm)\n if rnode and rnode in repo:\n lctx, rctx = repo[bm], repo[rnode]\n if bookmarks.validdest(repo, rctx, lctx):\n bookmarkedheads.add(lctx.node())\n\n # 3. Check for new heads.\n # If there are more heads after the push than before, a suitable\n # error message, depending on unsynced status, is displayed.\n error = None\n unsynced = False\n allmissing = set(outgoing.missing)\n allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))\n allfuturecommon.update(allmissing)\n for branch, heads in sorted(headssum.iteritems()):\n if heads[0] is None:\n # Maybe we should abort if we push more that one head\n # for new branches ?\n continue\n candidate_newhs = set(heads[1])\n # add unsynced data\n oldhs = set(heads[0])\n oldhs.update(heads[2])\n candidate_newhs.update(heads[2])\n dhs = None\n discardedheads = set()\n if repo.obsstore:\n # remove future heads which are actually obsolete by another\n # pushed element:\n #\n # XXX as above, There are several cases this case does not handle\n # XXX properly\n #\n # (1) if <nh> is public, it won't be affected by obsolete marker\n # and a new is created\n #\n # (2) if the new heads have ancestors which are not obsolete and\n # not ancestors of any other heads we will have a new head too.\n #\n # This two case will be easy to handle for know changeset but much\n # more tricky for unsynced changes.\n newhs = set()\n for nh in candidate_newhs:\n if nh in repo and repo[nh].phase() <= phases.public:\n newhs.add(nh)\n else:\n for suc in obsolete.allsuccessors(repo.obsstore, [nh]):\n if suc != nh and suc in allfuturecommon:\n discardedheads.add(nh)\n break\n else:\n newhs.add(nh)\n else:\n newhs = candidate_newhs\n if [h for h in heads[2] if h not in discardedheads]:\n unsynced = True\n if len(newhs) > len(oldhs):\n # strip updates to existing remote heads from the new heads list\n dhs = sorted(newhs - bookmarkedheads - oldhs)\n if dhs:\n if error is None:\n if branch not in ('default', None):\n error = _(\"push creates new remote head %s \"\n \"on branch '%s'!\") % (short(dhs[0]), branch)\n else:\n error = _(\"push creates new remote head %s!\"\n ) % short(dhs[0])\n if heads[2]: # unsynced\n hint = _(\"you should pull and merge or \"\n \"use push -f to force\")\n else:\n hint = _(\"did you forget to merge? \"\n \"use push -f to force\")\n if branch is not None:\n repo.ui.note(_(\"new remote heads on branch '%s'\\n\") % branch)\n for h in dhs:\n repo.ui.note(_(\"new remote head %s\\n\") % short(h))\n if error:\n raise util.Abort(error, hint=hint)\n\n # 6. Check for unsynced changes on involved branches.\n if unsynced:\n repo.ui.warn(_(\"note: unsynced remote changes!\\n\"))", "def git_remote(uri):\n remotes = git(['remote', '-v']).split('\\n')\n pattern = re.compile(r'(?iu)^(?P<name>[^ ]+)[\\t]+bzr::(?P<remote>[^ ]+)')\n for remote in remotes:\n log.debug('check remote: %s', remote)\n matches = pattern.match(remote)\n if matches and matches.group('remote') == uri:\n return matches.groups()\n return None, None", "def _get_remote_diff(repo, current_commit, remote, remote_branch_name):\n remote_ref = '/'.join((remote, remote_branch_name))\n if remote_ref in repo.get_remote_branches():\n lgr.debug(\"Testing for changes with respect to '%s' of remote '%s'\",\n remote_branch_name, remote)\n if current_commit is None:\n current_commit = repo.get_hexsha()\n remote_ref = repo.get_hexsha(remote_ref)\n diff = current_commit != remote_ref\n else:\n lgr.debug(\"Remote '%s' has no branch matching %r. Will publish\",\n remote, remote_branch_name)\n # we don't have any remote state, need to push for sure\n diff = True\n\n return diff", "def _is_current_branch(self, branch_name, current_branch_name):\n return branch_name == current_branch_name", "def local_diff_branch():\n # Only allow specified remote and branch in local dev.\n remote = os.getenv(LOCAL_REMOTE_ENV)\n branch = os.getenv(LOCAL_BRANCH_ENV)\n if remote is not None and branch is not None:\n return '%s/%s' % (remote, branch)", "def exists_remote(host, path):\n command = \"test -e \" + pipes.quote(path) + \" && echo 0 || echo 1\"\n (stdoutstring, stderrstring) = execute_ssh_command(host, port, USER, PASSWORD, None, None, command)\n\n for status in stdoutstring:\n if re.search('0', status):\n return True\n if re.search('1', status):\n return False", "def ensure_tracking_branches(args):\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n branch_missing = repo.command(\n [\"rev-parse\", \"--verify\", \"-q\", project.refspec],\n capture_stdout=True)\n \n if branch_missing:\n logging.warn(\"Branch %s does not exist in project %s. checking out.\" %\n (project.refspec, name))\n repo.command([\"branch\", \"--track\",\n project.tracking_branch, project.remote_refspec])", "def test_default_repo_branch(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Target: ywangd:dev\", output)", "def push(self, remote: str, branch: str,\n *, username: str = '', password: str = '') -> bool:\n self.__verify_repo_initialized()\n try:\n address = heads.get_remote_address(self._env.branchenv, name=remote)\n cHEAD = heads.get_branch_head_commit(self._env.branchenv, branch)\n except (KeyError, ValueError) as e:\n raise e from None\n\n CR = ContentReader(self._env)\n self._client = HangarClient(envs=self._env,\n address=address,\n auth_username=username,\n auth_password=password)\n\n # ----------------- setup / validate operations -------------------\n\n with closing(self._client) as client:\n client: HangarClient # type hinting for development\n CR: ContentReader\n c_bhistory = summarize.list_history(refenv=self._env.refenv,\n branchenv=self._env.branchenv,\n branch_name=branch)\n try:\n s_branch = client.fetch_branch_record(branch)\n except grpc.RpcError as rpc_error:\n # Do not raise if error due to branch not existing on server\n if rpc_error.code() != grpc.StatusCode.NOT_FOUND:\n raise rpc_error\n else:\n sHEAD = s_branch.rec.commit\n if sHEAD == cHEAD:\n warnings.warn(\n f'NoOp: server HEAD: {sHEAD} == client HEAD: {cHEAD}', UserWarning)\n return branch\n elif (sHEAD not in c_bhistory['order']) and (sHEAD != ''):\n warnings.warn(\n f'REJECTED: server branch has commits not on client', UserWarning)\n return branch\n\n # --------------- negotiate missing data to send -------------------\n\n try:\n # First push op verifies user permissions if push restricted (NOT SECURE)\n res = client.push_find_missing_commits(branch)\n m_commits = res.commits\n except grpc.RpcError as rpc_error:\n if rpc_error.code() == grpc.StatusCode.PERMISSION_DENIED:\n raise PermissionError(f'{rpc_error.code()}: {rpc_error.details()}')\n else:\n raise rpc_error\n\n m_labels, m_schemas = set(), set()\n m_schema_hashs = defaultdict(set)\n with tempfile.TemporaryDirectory() as tempD:\n tmpDF = os.path.join(tempD, 'test.lmdb')\n tmpDB = lmdb.open(path=tmpDF, **c.LMDB_SETTINGS)\n for commit in tqdm(m_commits, desc='counting objects'):\n # share unpacked ref db between dependent methods\n with tmpDB.begin(write=True) as txn:\n with txn.cursor() as curs:\n notEmpty = curs.first()\n while notEmpty:\n notEmpty = curs.delete()\n commiting.unpack_commit_ref(self._env.refenv, tmpDB, commit)\n # schemas\n schema_res = client.push_find_missing_schemas(commit, tmpDB=tmpDB)\n m_schemas.update(schema_res.schema_digests)\n # data hashs\n m_cmt_schema_hashs = defaultdict(list)\n mis_hashes_sch = client.push_find_missing_hash_records(commit, tmpDB=tmpDB)\n for hsh, schema in mis_hashes_sch:\n m_cmt_schema_hashs[schema].append(hsh)\n for schema, hashes in m_cmt_schema_hashs.items():\n m_schema_hashs[schema].update(hashes)\n # labels / metadata\n missing_labels = client.push_find_missing_labels(commit, tmpDB=tmpDB)\n m_labels.update(missing_labels)\n tmpDB.close()\n\n # ------------------------- send data -----------------------------\n\n # schemas\n for m_schema in tqdm(m_schemas, desc='pushing schemas'):\n schemaVal = CR.schema(m_schema)\n if not schemaVal:\n raise KeyError(f'no schema with hash: {m_schema} exists')\n client.push_schema(m_schema, schemaVal)\n # data\n total_data = sum([len(v) for v in m_schema_hashs.values()])\n with tqdm(total=total_data, desc='pushing data') as p:\n for dataSchema, dataHashes in m_schema_hashs.items():\n client.push_data(dataSchema, dataHashes, pbar=p)\n p.update(1)\n # labels/metadata\n for label in tqdm(m_labels, desc='pushing metadata'):\n labelVal = CR.label(label)\n if not labelVal:\n raise KeyError(f'no label with hash: {label} exists')\n client.push_label(label, labelVal)\n # commit refs\n for commit in tqdm(m_commits, desc='pushing commit refs'):\n cmtContent = CR.commit(commit)\n if not cmtContent:\n raise KeyError(f'no commit with hash: {commit} exists')\n client.push_commit_record(commit=cmtContent.commit,\n parentVal=cmtContent.cmtParentVal,\n specVal=cmtContent.cmtSpecVal,\n refVal=cmtContent.cmtRefVal)\n\n # --------------------------- At completion -----------------------\n\n # update local remote HEAD pointer\n branchHead = heads.get_branch_head_commit(self._env.branchenv, branch)\n try:\n client.push_branch_record(branch, branchHead)\n except grpc.RpcError as rpc_error:\n # Do not raise if error due to branch not existing on server\n if rpc_error.code() != grpc.StatusCode.ALREADY_EXISTS:\n logger.warning(f'CODE: {rpc_error.code()} DETAILS:{rpc_error.details()}')\n else:\n raise rpc_error\n else:\n cRemoteBranch = f'{remote}/{branch}'\n if cRemoteBranch not in heads.get_branch_names(self._env.branchenv):\n heads.create_branch(branchenv=self._env.branchenv,\n name=cRemoteBranch,\n base_commit=branchHead)\n else:\n heads.set_branch_head_commit(branchenv=self._env.branchenv,\n branch_name=cRemoteBranch,\n commit_hash=branchHead)\n return branch" ]
[ "0.7808668", "0.73967373", "0.73148525", "0.73089", "0.72829074", "0.72582304", "0.6801348", "0.65513825", "0.64167774", "0.6403177", "0.63310564", "0.6277862", "0.61876816", "0.61740935", "0.6012397", "0.59845227", "0.5948855", "0.59156495", "0.58973765", "0.58918685", "0.5798857", "0.57981795", "0.57872367", "0.57804763", "0.57736415", "0.5769363", "0.57556915", "0.57298535", "0.5728891", "0.57247037" ]
0.81284404
0
Return Git log output for unmerged commits.
def unmerged_log(self): return self.git.log('{}..{}'.format(self.base_branch, self.topic_branch))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def report_unmerged(unmerged):\n _report_files('unmerged', unmerged)", "def get_commit_message():\n return shell_output('git log HEAD -1 --pretty=%B')", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def ignore_merged_commits(self):\n return rh.shell.boolean_shell_value(\n self.config.get(self.OPTIONS_SECTION,\n self.OPTION_IGNORE_MERGED_COMMITS, None),\n False)", "def construct_merged_log_message(url, revnums):\n messages = ['']\n longest_sep = ''\n for r in revnums.sorted():\n message = get_commit_log(url, r)\n if message:\n message = re.sub(r'(\\r\\n|\\r|\\n)', \"\\n\", message)\n message = rstrip(message, \"\\n\") + \"\\n\"\n messages.append(prefix_lines(LOG_LINE_PREFIX, message))\n for match in LOG_SEPARATOR_RE.findall(message):\n sep = match[1]\n if len(sep) > len(longest_sep):\n longest_sep = sep\n\n longest_sep += LOG_SEPARATOR + \"\\n\"\n messages.append('')\n return longest_sep.join(messages)", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def retrieve_git_log(self):\n result = [str(entry).split(\"\\t\")[1]\n for entry in self.repo.head.log()]\n\n return result", "def get_git_diff_stdout() -> str:\n proc = subprocess.run(\n [\"git\", \"diff\", \"origin/main\", \"HEAD\"],\n capture_output=True,\n check=True,\n text=True,\n )\n return proc.stdout", "def __gitNotMergedBranchList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), False,\n listAll=False, merged=False)", "def log(self, current_path):\n p = Popen(\n [\"git\", \"log\", \"--pretty=format:%H%n%an%n%ar%n%s\", \"-10\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = []\n line_array = my_output.decode(\"utf-8\").splitlines()\n i = 0\n PREVIOUS_COMMIT_OFFSET = 4\n while i < len(line_array):\n if i + PREVIOUS_COMMIT_OFFSET < len(line_array):\n result.append(\n {\n \"commit\": line_array[i],\n \"author\": line_array[i + 1],\n \"date\": line_array[i + 2],\n \"commit_msg\": line_array[i + 3],\n \"pre_commit\": line_array[i + PREVIOUS_COMMIT_OFFSET],\n }\n )\n else:\n result.append(\n {\n \"commit\": line_array[i],\n \"author\": line_array[i + 1],\n \"date\": line_array[i + 2],\n \"commit_msg\": line_array[i + 3],\n \"pre_commit\": \"\",\n }\n )\n i += PREVIOUS_COMMIT_OFFSET\n return {\"code\": p.returncode, \"commits\": result}\n else:\n return {\"code\": p.returncode, \"message\": my_error.decode(\"utf-8\")}", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def get_repo_commits_except_merges(\n owner, repo, query_params=None, session=None,\n):\n return (\n commit for commit in get_repo_commits(\n owner, repo, query_params, session,\n ) if len(commit['parents']) < 2\n )", "def gitLogValue(format,directory):\n return subprocess.check_output([\"git\",\"log\",\"-1\",\"--pretty=format:%\"+format],cwd=directory).strip()", "def _output_commit_line(self): # noqa: C901, E501 pylint: disable=too-many-branches\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns + 1):\n if i == self.num_columns:\n if seen_this:\n break\n col_commit = self.commit\n else:\n col = self.columns[i]\n col_commit = self.columns[i].commit\n\n if col_commit == self.commit:\n seen_this = True\n self.buf += '*'\n chars_written += 1\n\n if self.num_parents > 2:\n chars_written += self._draw_octopus_merge()\n elif seen_this and self.num_parents > 2:\n self._write_column(col, '\\\\')\n chars_written += 1\n elif seen_this and self.num_parents == 2:\n # This is a 2-way merge commit. There is no\n # GraphState.PRE_COMMIT stage for 2-way merges, so this is the\n # first line of output for this commit. Check to see what the\n # previous line of output was.\n #\n # If it was GraphState.POST_MERGE, the branch line coming into\n # this commit may have been '\\', and not '|' or '/'. If so,\n # output the branch line as '\\' on this line, instead of '|'.\n # This makes the output look nicer.\n if (self.prev_state == GraphState.POST_MERGE and\n self.prev_commit_index < i):\n self._write_column(col, '\\\\')\n else:\n self._write_column(col, '|')\n chars_written += 1\n else:\n self._write_column(col, '|')\n chars_written += 1\n self.buf += ' '\n chars_written += 1\n\n self._pad_horizontally(chars_written)\n if self.num_parents > 1:\n self._update_state(GraphState.POST_MERGE)\n elif self._is_mapping_correct():\n self._update_state(GraphState.PADDING)\n else:\n self._update_state(GraphState.COLLAPSING)", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def get_commits_in_branch(branch_name):\n output = subprocess.check_output(\"git log --pretty=format:'{}' {} {}\".format(git_format, branch_name, args.extra_args), shell=True)\n lines = output.decode(\"utf-8\").split(\"\\n\")\n out = []\n for line in lines:\n if len(line) <= 1: break\n [sha, author, message] = line.split(\"\t\", 2)\n out.append((sha, author, message))\n out.reverse()\n return out", "def svn_client_mergeinfo_log_merged(char_path_or_url, svn_opt_revision_t_peg_revision, char_merge_source_path_or_url, svn_opt_revision_t_src_peg_revision, svn_log_entry_receiver_t_receiver, svn_boolean_t_discover_changed_paths, apr_array_header_t_revprops, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def get_unpushed_log(conn):\n cursor = conn.cursor()\n cursor.execute(\"SELECT id, task, start_time, end_time FROM timelogs WHERE pushed = 0\")\n rows = cursor.fetchall()\n return rows", "def update_log(self) -> str:\n\n def iter_fragment_lines(fragment: Fragment) -> Iterator[str]:\n for leaf in fragment.iter_leaves():\n action = \"removed\" if leaf.value == REMOVED else \"loaded\"\n yield action + \" \" + \".\".join(\n str(p) for p in leaf.path\n ) + \" from \" + str(leaf.source)\n\n lines = chain.from_iterable(\n iter_fragment_lines(fragment) for fragment in self._fragments\n )\n result = \"\\n\".join(lines)\n return result", "def get_old_log(self):\n return Gumtree.gumtree.getOldLog() + ';'", "def last_modified_commit(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%h',\n '--',\n *paths\n ], **kwargs).decode('utf-8')", "def __last_commit_date(self):\n return utils.run('git', ['log', '--all', '-1', '--format=%cI'],\n self.__project.location).rstrip()", "def _get_cleaned_logs(self, log, logstart, logend):\n start = log.find(logstart) + len(logstart)\n normal_log = log[start:].replace(logend, '')\n if normal_log.strip() != '' or self.session.run_counter == 1:\n return normal_log\n lastlogend = self.LOGEND%(self.session.uuid, self.session.run_counter - 1)\n start = log.find(lastlogend) + len(lastlogend)\n return log[start:].replace(logstart, '').replace(logend, '')", "def __first_commit_date(self):\n return utils.run('git',\n ['log', '--all', '--format=%cI', '--first-parent',\n '--reverse', '--max-parents=0'],\n self.__project.location).splitlines()[0].rstrip()", "def last_commit_date():\n return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad',\n '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()", "def get_git_log_command(\n verbose: bool, from_commit: str | None = None, to_commit: str | None = None\n) -> list[str]:\n git_cmd = [\n \"git\",\n \"log\",\n \"--pretty=format:%H %h %cd %s\",\n \"--date=short\",\n ]\n if from_commit and to_commit:\n git_cmd.append(f\"{from_commit}...{to_commit}\")\n elif from_commit:\n git_cmd.append(from_commit)\n git_cmd.extend([\"--\", \".\"])\n if verbose:\n console.print(f\"Command to run: '{' '.join(git_cmd)}'\")\n return git_cmd", "def commits_between(repo_path, start, end):\n \n git = subprocess.Popen([\"git\", \"log\", \"%s..%s\" % (start, end)], stdout=subprocess.PIPE, cwd=repo_path)\n log = git.stdout.read().decode(\"utf-8\")\n \n cur = None\n commits = []\n \n for line in log.splitlines():\n cm = re.match(r'commit ([a-f0-9]{40})', line)\n if cm is not None:\n if cur:\n commits.append(cur)\n cur = Commit(cm.group(1))\n \n if cur is not None and cm is None:\n if cur.message is None:\n if line.startswith(\"Author:\"):\n cur.author = line[len(\"Author: \"):]\n elif line.startswith(\"Date:\"):\n cur.date = line[len(\"Date: \"):]\n else:\n cur.message = \"\"\n else:\n cur.message += line.strip() + \"\\n\"\n \n if cur is not None:\n commits.append(cur)\n \n return commits", "def _get_log_as_str_list(start: str | None, end: str, args: str) -> list[str]:\n delimiter = \"----------commit-delimiter----------\"\n log_format: str = \"%H%n%s%n%an%n%ae%n%b\"\n git_log_cmd = (\n f\"git -c log.showSignature=False log --pretty={log_format}{delimiter} {args}\"\n )\n if start:\n command = f\"{git_log_cmd} {start}..{end}\"\n else:\n command = f\"{git_log_cmd} {end}\"\n c = cmd.run(command)\n if c.return_code != 0:\n raise GitCommandError(c.err)\n if not c.out:\n return []\n return c.out.split(f\"{delimiter}\\n\")", "def unmerged_total(self):\n return int(self.git.rev_list('--count', '{}..{}'.format(self.base_branch, self.topic_branch)))" ]
[ "0.605089", "0.5828341", "0.5793173", "0.5702663", "0.55771744", "0.55349654", "0.5473056", "0.5455185", "0.54213625", "0.53958446", "0.53584856", "0.529324", "0.52550614", "0.5119616", "0.50683516", "0.5061825", "0.50409853", "0.5032785", "0.5018177", "0.5006762", "0.5002617", "0.49939176", "0.49911863", "0.49622124", "0.4960173", "0.49499536", "0.49496478", "0.4946067", "0.49357218", "0.49206614" ]
0.80632204
0
Return number of unmerged commits.
def unmerged_total(self): return int(self.git.rev_list('--count', '{}..{}'.format(self.base_branch, self.topic_branch)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_git_commiter_count(path):\n process = subprocess.Popen(['git', 'shortlog', '-sn'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n committers = stdout.decode(\"ISO-8859-1\")\n return len(committers.split('\\n'))", "def get_commit_count():\n if COMMIT_COUNT is None:\n return shell_output('git rev-list {base_version}..HEAD --count'\n .format(base_version=get_base_version()))\n return COMMIT_COUNT", "def get_git_commit_count(path):\n process = subprocess.Popen(['git', 'rev-list', 'HEAD', '--count', '--no-merges'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n number = stdout.decode().strip(\"\\n\")\n return int(number)", "def number_commits_recorded(refenv) -> int:\n return len(list_all_commits(refenv))", "def test_repo_commit_count():\n\n commit_count = BehavioralUtils.count_commits('drupal', 'builds')\n assert commit_count == 4", "def totaled_total_commits(cc, sql_time_specification): # pragma: no cover\n cc.execute(\"\"\"SELECT COUNT(*)\n FROM git_commit\n WHERE %s\"\"\" % sql_time_specification)\n result = cc.fetchone()\n return int(result[0])", "def compute(self):\n\n commit_hashes = {item['hash'] for item in self.items}\n return len(commit_hashes)", "def get_total_commits_per_user_excluding_merges(owner, repo, session):\n contributors = get_repo_contributors(owner, repo, session)\n return {\n contributor['login']: contributor['total']\n for contributor in contributors\n }", "def count_fragments(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_fragments()\n return n", "def count_fragments(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_fragments()\n return n", "def unsaved_changes(self) -> int:\n return (\n abs(self._snapshot_index - self._last_save_snapshot)\n + self._branch_save_count\n )", "def undone_count(self):\n\n return self.task_set.filter(done=False).count()", "def getOpenEditorsCount(self):\n return len(self.editors)", "def get_count(owner, repo_slug, auth_tokens, endpoint):\n count_url = make_req_url(owner, repo_slug, endpoint, 0)\n response = send_bitbucket_request(count_url, auth_tokens)\n if response and 'count' in response:\n return response['count']-1\n return 0", "def message_count(self) -> int:\n return len(self._leased_messages)", "def get_commits(github_id, repo):\r\n\r\n url = 'https://api.github.com/repos/{}/{}/commits'.format(github_id, repo)\r\n response = requests.get(url)\r\n todos = json.loads(response.text)\r\n\r\n commit_count = 0\r\n\r\n for data in todos:\r\n commit_count += 1\r\n\r\n return commit_count", "def no_locked_budgets(self) -> int:\n count = 0\n for budget in self.budgets.values():\n if budget.locked:\n count += 1\n return count", "def count(self):\n # TODO not implemented yet\n return 0", "def commits_behind_master(self, commit):\n return len(\n self.run(['git', 'rev-list',\n '{}..origin/master'.format(commit)]).splitlines())", "def refspec_count(self):\n\n return C.git_remote_refspec_count(self._remote)", "def report_unmerged(unmerged):\n _report_files('unmerged', unmerged)", "def get_total_commits_per_user(commits):\n return get_total_contributions_per_user(commits, 'author')", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def count(self):\n return len(self._commands)", "def num_tickets_left(self):\r\n return self._contributions_left[1]", "def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)", "def nBranches(self):\n\n\t\treturn self._nBranches", "def getNumCleanedTiles(self):\n\t\tr = 0\n\t\tfor i in self.tiles:\n\t\t\tif i.isClean(): r += 1\n\t\treturn r", "def count_cop(self, infile):\n n_cop = 0\n dgs_in = self._file_handler.file_to_dg_list(infile)\n for dg in dgs_in:\n if dg.has_cop_deprel():\n n_cop += 1\n return n_cop, len(dgs_in)", "def commit_count(commit_info_dict):\n commit_counts = {}\n for release, commit_dict in commit_info_dict.items():\n commit_counts_per_release = {}\n for user_id, commit_list in commit_dict.items():\n commit_counts_per_release[user_id] = len(commit_list)\n commit_counts[release] = commit_counts_per_release\n return commit_counts" ]
[ "0.6773051", "0.6740454", "0.6568362", "0.6104326", "0.60325164", "0.59609437", "0.5774543", "0.56423247", "0.56212234", "0.56212234", "0.5621026", "0.53949195", "0.5394291", "0.539106", "0.5321197", "0.5314997", "0.5308237", "0.5281387", "0.52706915", "0.52404267", "0.5229894", "0.5229493", "0.52230877", "0.52156895", "0.5211323", "0.5205869", "0.5204543", "0.519667", "0.5188304", "0.51838964" ]
0.75320995
0
Get the current datetime (UTC+0). The accuracy is limited to milliseconds and the remaining microseconds are cleared.
def now() -> datetime: now = datetime.now(tz=timezone.utc) return now.replace(microsecond=now.microsecond - now.microsecond % 1000)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nowUTC():\n return datetime.datetime.now(pytz.utc)", "def now_utc() -> datetime:\n return datetime.now(timezone.utc)", "def datetime_utc_now() -> datetime:\n return datetime.now(timezone.utc)", "def _get_now():\n return datetime.now(tz=timezone.utc)", "def get_now():\n return dt.datetime.now(dt.timezone.utc)", "def now_datetime():\n return datetime.utcnow().replace(tzinfo=timezone)", "def now():\n return datetime.datetime.now(pytz.utc)", "def datetime_utcnow() -> datetime:\n return datetime.now(tz=pytz.timezone('UTC'))", "def _now():\n return datetime.now(timezone.utc).astimezone()", "def get_utc_now():\n return datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(\"UTC\"))", "def _now():\n return datetime.datetime.utcnow().replace(tzinfo=pytz.utc)", "def Now():\n ut = (datetime.datetime.utcnow() - _EPOCH).total_seconds() / 86400.0\n return Time(ut)", "def utcnow(cls):\n t = _time.time()\n return cls.utcfromtimestamp(t)", "def utc_now():\n return datetime.now(tz=timezone.utc)", "def utc_now():\n realtime = datetime.utcnow()\n realtime = pytz.utc.localize(realtime)\n return realtime", "def now(cls, tz=None):\n return datetime()", "def now():\n return utcfromtimestamp(time.time())", "def get_now_utc(no_microseconds=True):\n if no_microseconds:\n return pytz.utc.localize(datetime.datetime.utcnow()).replace(\n microsecond=0\n )\n else:\n return pytz.utc.localize(datetime.datetime.utcnow())", "def get_now_utc(no_microseconds=True):\n if no_microseconds:\n return pytz.utc.localize(datetime.datetime.utcnow()).replace(\n microsecond=0\n )\n else:\n return pytz.utc.localize(datetime.datetime.utcnow())", "def now_dt(tz='UTC'):\n if tz != 'UTC':\n raise NotImplementedError()\n return datetime.datetime.utcnow().replace(tzinfo = pytz.utc)", "def utcnow() -> datetime.datetime:\n return datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)", "def now(cls):\n return DateTime(*time.localtime())", "def now(self):\n if 'timezone' in self._data:\n return pytz.utc.localize(datetime.datetime.utcnow()).astimezone(pytz.timezone(self._data['timezone']))\n else:\n return pytz.utc.localize(datetime.datetime.utcnow())", "def FromNowUTC(cls):\n t = pytime.time()\n utcTime = pytime.gmtime(t)\n return cls.FromStructTime(utcTime).WithZone(zDirection=0)", "def utcnow():\n return datetime.utcnow().replace(tzinfo=UTC)", "def currentUTC():\n return str(datetime.utcnow())", "def get_today_utc():\n return pytz.utc.localize(datetime.datetime.utcnow()).replace(\n hour=0, minute=0, second=0, microsecond=0\n )", "def get_today_utc():\n return pytz.utc.localize(datetime.datetime.utcnow()).replace(\n hour=0, minute=0, second=0, microsecond=0\n )", "def now (self):\n return datetime.datetime.utcnow ()", "def now():\n return datetime.datetime.utcnow()" ]
[ "0.7628507", "0.75773877", "0.7516025", "0.751259", "0.74952275", "0.73824656", "0.73744327", "0.7368627", "0.73526394", "0.7349973", "0.7315829", "0.7300803", "0.72711504", "0.7261088", "0.7256142", "0.72537535", "0.7203686", "0.7187088", "0.7187088", "0.71670175", "0.71532047", "0.7144613", "0.70751214", "0.7052283", "0.7047307", "0.70445466", "0.7035265", "0.7035265", "0.69935143", "0.6957789" ]
0.82576597
0
Get a datetime (UTC+0) from a given representation.
def from_string(representation: str) -> datetime: return parse(representation).replace(tzinfo=timezone.utc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _epoch_utc_to_datetime(epoch_utc):\n return datetime.fromtimestamp(epoch_utc)", "def FromNowUTC(cls):\n t = pytime.time()\n utcTime = pytime.gmtime(t)\n return cls.FromStructTime(utcTime).WithZone(zDirection=0)", "def convert_utc(utc) -> dt.datetime:\n return iso8601.parse_date(utc)", "def datetime_utc_epoch_start() -> datetime:\n return timestamp_to_datetime(0)", "def utcdatetime_from_tuple(date):\r\n line = str(date[0])\r\n if date[1] < 10:\r\n line += '0'\r\n line += str(date[1])\r\n if date[2] < 10:\r\n line += '0'\r\n line += str(date[2])\r\n return utcdatetime_from_string(line)", "def fromtimestamp(cls, timestamp):\n return datetime.datetime.utcfromtimestamp(timestamp)", "def to_datetime(value: str | None) -> datetime.datetime | None:\n if value is None:\n return None\n if value.endswith(\"Z\"):\n # Parse and set the timezone as UTC\n o = datetime.datetime.fromisoformat(value[:-1]).replace(\n tzinfo=datetime.timezone.utc\n )\n else:\n o = datetime.datetime.fromisoformat(value)\n if o.tzinfo:\n # Convert any aware datetime to UTC\n return o.astimezone(datetime.timezone.utc)\n return o", "def utcfromtimestamp(cls, t):\n return cls._fromtimestamp(t, True, None)", "def as_utc_datetime(timespec):\n try:\n dt = as_datetime(timespec, tz=REF_TZ)\n utc_dt = dt.astimezone(pytz.UTC)\n return utc_dt\n except Exception:\n raise DatetimeCoercionFailure(timespec=timespec, timezone=pytz.UTC)", "def date_to_python(self, value):\r\n # this throws away fractions of a second\r\n return datetime(*strptime(value[:-5], \"%Y-%m-%dT%H:%M:%S\")[0:6])", "def date_from_utc(date):\n return pytz.utc.localize(date)", "def from_timestamp(timestamp: float, tz_info: tzinfo = UTC) -> datetime:\n utc_dt = datetime.fromtimestamp(timestamp, tz=UTC)\n return convert_timezone(utc_dt, tz_info)", "def make_datetime_from_string(string):\n return datetime.datetime.strptime(string, \"%Y-%m-%dT%H:%M:%S%z\")", "def microseconds_to_datetime(ms):\n return datetime.utcfromtimestamp(ms / 1000000.0).replace(tzinfo=pytz.utc)", "def human_to_utc_datetime(x):\n\n return parsedatetime.Calendar().parseDT(datetimeString=x,\n sourceTime=datetime.utcnow(),\n tzinfo=timezone(\"UTC\"))[0]", "def fromtimestamp(cls, timestamp, tz=None):\n return datetime()", "def utcdatetime(self):\n utc = self.toZone('UTC')\n second = int(utc._second)\n microsec = utc.micros() % 1000000\n dt = datetime(utc._year, utc._month, utc._day, utc._hour,\n utc._minute, second, microsec)\n return dt", "def get_datetime(hours):\n return datetime.datetime.utcfromtimestamp(hours * 60 * 60)", "def datetime_from(text):\n eastern = pytz.timezone(\"US/Eastern\")\n if text.endswith(\"T00:00:00\"):\n text = text[:-len(\"T00:00:00\")]\n time = datetime.strptime(text, \"%Y-%m-%d\")\n time = time.replace(hour=23, minute=59, second=59)\n time = eastern.localize(time)\n return time.astimezone(pytz.utc)", "def date_from_utc_ts(ts):\n return date_from_utc(dt.utcfromtimestamp(ts / 1000.0))", "def _parse_datetime(s):\r\n if s:\r\n return datetime.strptime(s, ISO8601)\r\n else:\r\n return datetime.fromtimestamp(0)", "def _parse_date(s):\n return parse(s).astimezone(pytz.utc)", "def _timestamp_to_datetime(timestamp):\n return datetime.fromtimestamp(timestamp * 0.001, tz=timezone.utc)", "def rfc2822_to_datetime(rfc_date):\n timestamp = mktime_tz(parsedate_tz(rfc_date))\n raw_dt = datetime.datetime.utcfromtimestamp(timestamp)\n return raw_dt.replace(tzinfo=pytz.utc)", "def make_naive_utc(date_time: datetime.datetime) -> datetime.datetime:\n utc_timezone = datetime.timezone(datetime.timedelta(seconds=0))\n return date_time.astimezone(utc_timezone).replace(tzinfo=None)", "def hydrate_datetime(seconds, nanoseconds, tz=None):\n minutes, seconds = map(int, divmod(seconds, 60))\n hours, minutes = map(int, divmod(minutes, 60))\n days, hours = map(int, divmod(hours, 24))\n seconds = (1000000000 * seconds + nanoseconds) / 1000000000\n t = DateTime.combine(Date.from_ordinal(unix_epoch_date_ordinal + days), Time(hours, minutes, seconds))\n if tz is None:\n return t\n if isinstance(tz, int):\n tz_offset_minutes, tz_offset_seconds = divmod(tz, 60)\n zone = FixedOffset(tz_offset_minutes)\n else:\n zone = timezone(tz)\n return zone.localize(t)", "def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])", "def parse_utc_string(self, utc_string: str) -> dt.datetime:\n return dt.datetime(*map(int, re.split(r\"[^\\d]\", utc_string)[:-1])).timestamp() + self.get_tz_offset() * 60", "def str_to_datetime(str_repr):\n # Allow the caller to be stupid.\n if type(str_repr) == datetime.datetime:\n return str_repr\n if not str_repr:\n return None\n return ciso8601.parse_datetime(str_repr)", "def datetime(s):\n default = datetime(1, 1, 1, 0, 0)\n if s is None or s == \"\":\n d = parse(\"0001-01-01\", default=default)\n else:\n d = parse(s, default=default)\n\n return d" ]
[ "0.6436676", "0.63259554", "0.6290457", "0.6282037", "0.6262281", "0.61762154", "0.61416876", "0.6114463", "0.60666364", "0.5983876", "0.5970409", "0.595779", "0.59017533", "0.58899635", "0.5870173", "0.5864908", "0.5840024", "0.58342457", "0.57807434", "0.57765967", "0.57617706", "0.574623", "0.57203203", "0.5709534", "0.5705521", "0.5693524", "0.56923974", "0.5684676", "0.56809145", "0.5678451" ]
0.7052922
0
Update NLPIR license file if it is outofdate or missing.
def update_license_file(data_dir): license_file = os.path.join(data_dir, LICENSE_FILENAME) temp_dir = tempfile.mkdtemp() gh_license_filename = os.path.join(temp_dir, LICENSE_FILENAME) try: _, headers = urlretrieve(LICENSE_URL, gh_license_filename) except IOError as e: # Python 2 uses the unhelpful IOError for this. Re-raise as the more # appropriate URLError. raise URLError(e.strerror) with open(gh_license_filename, "rb") as f: github_license = f.read() try: with open(license_file, "rb") as f: current_license = f.read() except (IOError, OSError): current_license = b"" github_digest = hashlib.sha256(github_license).hexdigest() current_digest = hashlib.sha256(current_license).hexdigest() if github_digest == current_digest: return False shutil.copyfile(gh_license_filename, license_file) shutil.rmtree(temp_dir, ignore_errors=True) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_frozen_license() -> int:\n srcpath = Path(\"doc/src/license.rst\")\n dstpath = Path(\"cx_Freeze/initscripts/frozen_application_license.txt\")\n try:\n content = srcpath.read_text(encoding=\"utf-8\")\n except OSError:\n print(ERROR1, file=sys.stderr)\n return 1\n content = FROZEN_HEADER + \"\\n\".join(content.splitlines()[1:]) + \"\\n\"\n try:\n dstpath.write_text(content, encoding=\"utf-8\")\n print(dstpath, \"ok\")\n except OSError as io_error:\n print(ERROR2, f\"({io_error}).\", file=sys.stderr)\n return 1\n return 0", "def upload_license(self):\n param = self.module.params[\"param\"]\n license_file_path = param['license_file_path']\n if license_file_path and os.access(license_file_path, os.F_OK) and os.access(license_file_path, os.R_OK):\n self.client.upload_license(license_file_path)\n self.module.exit_json(msg=\"Import license file Success.\", changed=True, status='success')\n else:\n self.module.fail_json(msg=\"Import license file Fail.Please add 'hw_license_file_path' \"\n \"and make sure it can be read.\",\n changed=True, status='fail')", "def update_license(self, sKey, sUser, sCompany):\n\t\treturn Job(SDK.PrlSrv_UpdateLicense(self.handle, sKey, sUser, sCompany)[0])", "def refresh_license(self) -> PrivXAPIResponse:\n response_status, data = self._http_post(\n UrlEnum.LICENSE.REFRESH,\n )\n return PrivXAPIResponse(response_status, HTTPStatus.OK, data)", "def _update_properties_file(self, lines, filename):\n found_version_line = False\n if filename.endswith('cogent-requirements.txt'):\n for lineno, line in enumerate(lines):\n if 'packages/source/c/cogent' in line:\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n http_base = lines[lineno].rsplit('/',1)[0]\n lines[lineno] = '%s/PyCogent-%s.tgz\\n' % (http_base, self.Version)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def test_release_update_available_NO(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(None, next)", "def test_errata_update_available(self):\n self._uri({\n '%d.%d/maintained/errata%d/all/Packages.gz' % (MAJOR, MINOR, ERRAT + 1): DATA,\n })\n sec = self.u.errata_update_available()\n self.assertEqual(ERRAT + 1, sec)", "def refresh(self):\n self._get_license_details()", "def test_up_to_date(self):\n last_public_release = get_pypi_version()\n self.assertFalse(update_available(last_public_release))", "def test_a_renew_non_active_license(self):\n self.assertTrue(self.status.is_ready(), \"The license is active, non active state awaited\")\n with self.assertRaisesRegexp(IOError, 'PUT .* HTTP error 4[0-9][0-9]$'):\n self.status.renew(self.status.DEVICEID1, self.status.DEVICENAME1, self.end+2*self.ADAY)", "def set_pkg_license_from_file(self, doc, lic):\n self.assert_package_exists()\n if validations.validate_lics_from_file(lic):\n doc.package.licenses_from_files.append(lic)\n return True\n else:\n raise SPDXValueError('Package::LicensesFromFile')", "def releaseLicence(self):\n\t\t\tpulpCPLEX.releaseLicence()", "def add_license(fitsfile, lic):\n try:\n hdulist = pyfits.open(fitsfile, mode=\"update\")\n except:\n print(\"Oops! Something's gone wrong :-(\", file=sys.stderr)\n else:\n prihdr = hdulist[0].header\n prihdr[\"LICENSE\"] = liclist[lic][\"name\"]\n prihdr[\"LICVER\"] = liclist[lic][\"ver\"]\n prihdr[\"LICURL\"] = liclist[lic][\"url\"]\n add_comments(prihdr)\n hdulist.close()", "def license(p):\n # Input file\n f = '/'.join([p, 'collector.stats'])\n check_path(f)\n\n # Open file with universal newline support\n with open(f, 'rU') as fh:\n for line in fh.readlines():\n if 'License key' in line:\n license = line.split(':')[1].strip()\n break\n\n return license", "def hacking_has_correct_license(physical_line, filename, lines, line_number):\n # don't work about init files for now\n # skip files that are < 10 lines, which isn't enough for a license to fit\n # this allows us to handle empty files, as well as not fail on the Okay\n # doctests.\n if _project_is_apache() and not line_number > 1 and len(lines) > 10:\n for idx, line in enumerate(lines):\n # if it's more than 10 characters in, it's probably not in the\n # header\n if (0 < line.find('Licensed under the Apache License') < 10\n and not _check_for_exact_apache(idx, lines)):\n return (idx, \"H103: Header does not match Apache 2.0 \"\n \"License notice\")", "def check_auto_update(self):\n\n # pylint: disable=W0201\n\n if self.filename is None:\n return\n try:\n filename = self.filename\n timestamp = os.stat(self.filename).st_mtime\n if self.timestamp is None or self.timestamp < timestamp:\n logger.debug(\"Updating %s, timestamp %s\",\n filename, rpki.sundial.datetime.fromtimestamp(timestamp))\n f = open(filename, \"rb\")\n value = f.read()\n f.close()\n self.clear()\n if looks_like_PEM(value):\n self._set_PEM(value)\n else:\n self.DER = value\n self.filename = filename\n self.timestamp = timestamp\n except (IOError, OSError), e:\n now = rpki.sundial.now()\n if self.lastfail is None or now > self.lastfail + self.failure_threshold:\n logger.warning(\"Could not auto_update %r (last failure %s): %s\", self, self.lastfail, e)\n self.lastfail = now\n else:\n self.lastfail = None", "def test_release_update_available_CURRENT(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n self.assertRaises(U.RequiredComponentError, self.u.release_update_available, errorsto='exception')", "def licensify(command_line_args):\n with open(command_line_args.license) as fp:\n license_header = fp.read()\n files = [\n path.join(dirname, f)\n for dirname, _, filenames in walk(command_line_args.directory)\n for f in fnmatch.filter(filenames, command_line_args.files)\n if not (command_line_args.exclude and fnmatch.fnmatch(f, command_line_args.exclude))\n ]\n try:\n result = apply_license_header(\n license_header, files,\n command_line_args.check, command_line_args.dry_run or command_line_args.check\n )\n except LicensesOutOfDateError as error:\n stdout.write(repr(error))\n exit(1)\n if result:\n message = 'The following files have been changed: {}'.format(', '.join(result))\n else:\n message = 'No files changed'\n stdout.write(message + linesep)", "def license(new_key):\n if new_key is not None:\n # click.echo('Saving key to configuration')\n config.set_license(new_key)\n license_key = config.get_license()\n if license_key:\n click.echo(license_key)\n else:\n click.echo(\"No license found: Use --set to configure the key\")", "def apply_pending_updates_if_available(self):\n if self.path_exists(self._module) and 'next' in os.listdir(self._module):\n if '.version' in os.listdir(self.get_module_and_path('next')):\n pending_update_version = self.get_version(self.get_module_and_path('next'))\n print('Pending update found: ', pending_update_version)\n if self.path_exists(self.get_module_and_path(self._main_dir)):\n self.rmtree(self.get_module_and_path(self._main_dir)) # Remove the 'main' directory and contents.\n os.rename(self.get_module_and_path('next'), self.get_module_and_path(self._main_dir)) # Move the 'next' to 'main'\n print('Update applied (', pending_update_version, '), ready to rock and roll')\n else:\n print('Corrupt pending update found, discarding...')\n self.rmtree(self.get_module_and_path('next'))\n else:\n print('No pending update found')", "def download_updates_if_available(self):\n current_version = self.get_version(self.get_module_and_path(self._main_dir))\n latest_version = self.get_latest_version()\n\n print('Checking version... ')\n print('\\tCurrent version: ', current_version)\n print('\\tLatest version: ', latest_version)\n\n if not latest_version:\n return False\n\n if (not current_version) or (latest_version > current_version):\n print('Updating...')\n if not self.path_exists(self._module):\n os.mkdir(self._module)\n\n # Check if there's a botched download already. If next directory already exists remove it and tree.\n if self.path_exists(self.get_module_and_path('next')):\n self.rmtree(self.get_module_and_path('next')) # Remove the 'next' directory and contents.\n\n # Create the next directory and download the source files.\n os.mkdir(self.get_module_and_path('next'))\n self.download_all_files(self._github_repo + '/contents/' + self._main_dir, latest_version)\n\n # Last step is to write the .version file only if we have completed the download\n with open(self.get_module_and_path('next/.version'), 'w') as versionfile:\n versionfile.write(latest_version)\n versionfile.close()\n\n return True\n return False", "def licensecleanup(): # 3\n res = _msk.Env.licensecleanup()\n if res != 0:\n raise Error(rescode(res),\"\")", "def update(self, purge = True): # need to complete\n\t\tif purge:\n\t\t\tself.__download()\n\t\t\tself.__write()\n\t\telse:\n\t\t\tpass", "def update(self, gppkg_filename):\n run_command(\"gppkg --update %s\" % gppkg_filename)\n self.assertTrue(self.check_install(gppkg_filename))", "def test_release_update_available_PATCH(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR, PATCH + 1)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def __upgrade_install__(path, release):\n install = Popen([\"freebsd-update\", \"-b\", path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(path), \"-r\",\n release, \"install\"], stderr=PIPE)\n install.communicate()\n\n return install.returncode", "def set_concluded_license(self, doc, lic):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_conc_lics_set:\n self.file_conc_lics_set = True\n if validations.validate_lics_conc(lic):\n self.file(doc).conc_lics = lic\n return True\n else:\n raise SPDXValueError('File::ConcludedLicense')\n else:\n raise CardinalityError('File::ConcludedLicense')\n else:\n raise OrderError('File::ConcludedLicense')", "def set_file_license_comment(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_license_comment_set:\n self.file_license_comment_set = True\n if validations.validate_file_lics_comment(text):\n self.file(doc).license_comment = str_from_text(text)\n else:\n raise SPDXValueError('File::LicenseComment')\n else:\n raise CardinalityError('File::LicenseComment')\n else:\n raise OrderError('File::LicenseComment')", "def _update_pyrex_file(self, lines, filename):\n found_version_line = False\n for lineno, line in enumerate(lines):\n if line.startswith('__version__'):\n found_version_line = True\n break\n if found_version_line:\n if self.Verbose:\n print 'Version string found on line %d' % lineno\n lines[lineno] = '__version__ = \"%s\"\\n' % str(self.VersionTuple)\n else:\n print \"No version string found in %s\" % filename\n return (lines, found_version_line)", "def set_file_license_in_file(self, doc, lic):\n if self.has_package(doc) and self.has_file(doc):\n if validations.validate_file_lics_in_file(lic):\n self.file(doc).add_lics(lic)\n return True\n else:\n raise SPDXValueError('File::LicenseInFile')\n else:\n raise OrderError('File::LicenseInFile')" ]
[ "0.61703426", "0.5742456", "0.5672786", "0.5615723", "0.5595802", "0.55362254", "0.54463863", "0.5431519", "0.5422802", "0.53913784", "0.53868955", "0.53640527", "0.5349856", "0.5262421", "0.5235656", "0.52349484", "0.5227492", "0.5205454", "0.51876825", "0.51652694", "0.5161832", "0.5152928", "0.5151355", "0.51415896", "0.51414764", "0.5139386", "0.51335025", "0.5126989", "0.5125119", "0.5105134" ]
0.63965
0
Find rhombus from a contour If shape is not found, return none and unidentified string
def detect_rhombus(approx): max_length_diff = Rhombuses.get_max_length_diff_in_quad(approx) if max_length_diff > Rhombuses.MAX_SIDE_LENGTH_DIFF: return None, Shapes.UNIDENTIFIED_SHAPE return approx, Shapes.RHOMBUS_SHAPE
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_shape(contour):\n # Initialize the shape name and approximate the contour\n shape = \"unidentified\"\n peri = cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, 0.04 * peri, True)\n if len(approx) == 3:\n shape = \"triangle\"\n if len(approx) == 4:\n # compute the bounding box of the contour and use the\n # bounding box to compute the aspect ratio\n (x, y, w, h) = cv2.boundingRect(approx)\n ar = w / float(h)\n # a square will have an aspect ratio that is approximately\n # equal to one, otherwise, the shape is a rectangle\n shape = \"square\" if 0.95 <= ar <= 1.05 else \"rectangle\"\n elif len(approx) == 5:\n shape = \"pentagon\"\n else:\n shape = \"circle\"\n return shape", "def detectShape(c):\n shape = \"unidentified\"\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.04 * peri, True)\n\n # if the shape is a triangle, it will have 3 vertices\n if len(approx) == 3:\n shape = \"triangle\"\n \n # if the shape has 4 vertices, it is either a square or\n # a rectangle\n elif len(approx) == 4:\n # compute the bounding box of the contour and use the\n # bounding box to compute the aspect ratio\n (x, y, w, h) = cv2.boundingRect(approx)\n ar = w / float(h)\n \n # a square will have an aspect ratio that is approximately\n # equal to one, otherwise, the shape is a rectangle\n shape = \"square\" if (ar == 0.95 and ar <= 1.05) else \"rectangle\"\n \n # if the shape is a pentagon, it will have 5 vertices\n elif len(approx) == 5:\n shape = \"pentagon\"\n \n # otherwise, we assume the shape is a circle\n else:\n shape = \"circle\"\n \n # return the name of the shape\n return shape", "def detect(self, contour):\n # Initialize the shape name and get the shape perimeter.\n shape = \"unidentified\"\n perimeter = cv2.arcLength(contour, True)\n\n # Approximate the contour to 'smooth' the shape. Perimeter of appoximation can be up to 4% different.\n approx = cv2.approxPolyDP(contour, 0.03 * perimeter, True)\n\n # len() will give the number of vertices of the shape.\n if len(approx) == 3:\n shape = \"triangle\"\n\n # Check if the sides are all equal for special case of square.\n elif len(approx) == 4:\n # Compute the bounding box of the contour and use the bounding box to compute the aspect ratio.\n (x, y, w, h) = cv2.boundingRect(approx)\n aspect_ratio = w / float(h)\n # A square will have an aspect ratio that is close to 1, otherwise, the shape is a rectangle.\n shape = \"square\" if aspect_ratio >= 0.95 and aspect_ratio <= 1.05 else \"rectangle\"\n\n elif len(approx) == 5:\n shape = \"pentagon\"\n\n # Otherwise assume the shape is a circle.\n else:\n shape = \"circle\"\n\n return shape", "def shape_contour(contour):\n width = max(contour[1][0]-contour[0][0], contour[3][0]-contour[2][0])\n height = max(contour[3][1]-contour[0][1],contour[2][1]-contour[1][1])\n return height,width", "def locate_shape(shape):", "def getCharacterContour(space, debug=False):\r\n\r\n contours = cv2.findContours(space, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\r\n contours = imutils.grab_contours(contours)\r\n\r\n # Sort contours by area, descending\r\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\r\n\r\n totalArea = space.shape[0] * space.shape[1]\r\n\r\n for contour in contours:\r\n\r\n area = cv2.contourArea(contour)\r\n hull = cv2.convexHull(contour)\r\n hull_area = cv2.contourArea(hull)\r\n\r\n if debug:\r\n cv2.drawContours(space, contours, 0, (128, 255, 60), 2)\r\n cv2.imshow('space', space)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n if cv2.contourArea(cv2.convexHull(contour)) < totalArea * 0.05:\r\n break\r\n\r\n if cv2.contourArea(cv2.convexHull(contour)) < totalArea * 0.95:\r\n return contour\r\n\r\n return None", "def get_building_contour(current_building_mask):\n ret, threshed = cv.threshold(current_building_mask, 0, 2 ** 16, cv.THRESH_BINARY)\n compressed = threshed.astype(np.uint8)\n current_building_contour, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n return current_building_contour, hierarchy", "def find_huc(source, shape, in_crs, hint, shrink_factor=1.e-5):\n def _in_huc(shply, huc_shply):\n \"\"\"Checks whether shp is in HUC\"\"\"\n if huc_shply.contains(shply):\n return 2\n elif huc_shply.intersects(shply):\n return 1\n else:\n return 0\n\n def _find_huc(source, shply, crs, hint):\n \"\"\"Searches in hint to find shp.\"\"\"\n logging.debug('searching: %s'%hint)\n hint_level = len(hint)\n search_level = hint_level + 2\n if search_level > source.lowest_level:\n return hint\n\n _, subhus = get_hucs(source, hint, search_level, crs)\n \n for subhu in subhus:\n inhuc = _in_huc(shply, subhu)\n\n if inhuc == 2:\n # fully contained in try_huc, recurse\n hname = workflow.sources.utils.get_code(subhu, search_level)\n logging.debug(' subhuc: %s contains'%hname)\n return _find_huc(source, shply, crs, hname)\n elif inhuc == 1:\n hname = workflow.sources.utils.get_code(subhu, search_level)\n logging.debug(' subhuc: %s partially contains'%hname)\n # partially contained in try_huc, return this\n return hint\n else:\n hname = workflow.sources.utils.get_code(subhu, search_level)\n logging.debug(' subhuc: %s does not contain'%hname)\n assert(False)\n\n if type(shape) is shapely.geometry.Polygon:\n shply = shape\n else:\n shply = workflow.utils.shply(shape)\n\n # must shrink the poly a bit in case it is close to or on a boundary\n radius = np.sqrt(shply.area/np.pi)\n shply_s = shply.buffer(-shrink_factor*radius)\n\n hint = workflow.sources.utils.huc_str(hint)\n\n _, hint_hu = get_huc(source, hint, in_crs)\n inhuc = _in_huc(shply_s, hint_hu)\n if inhuc != 2:\n raise RuntimeError(\"{}: shape not found in hinted HUC '{}'\".format(source.name, hint))\n\n result = _find_huc(source, shply_s, in_crs, hint)\n return result", "def __draw_rhombus(img, rhombus):\n for i, point in enumerate(rhombus):\n p1 = tuple(rhombus[i][0])\n p2 = tuple(rhombus[(i+1) % 4][0])\n cv2.line(img, p1, p2, color=(29, 131, 255), thickness=2)\n return img", "def __CalculateCircularity(self, contour):\r\n if len(contour) < 2:\r\n return 0\r\n\r\n perimeter = cv2.arcLength(contour, False)\r\n area = self.__CalculateArea(contour)\r\n return (4 * math.pi * area) / (perimeter * perimeter)", "def harris_corner_detector(img, image_name):\n x_len = img.shape[0]\n y_len = img.shape[1]\n\n horizontal_sobel_filter = np.array([[1, 2, 1],\n [0, 0, 0],\n [-1, -2, -1]])\n\n vertical_sobel_filter = np.array([[-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]])\n\n # Compute edge strength of pixels in image\n gx = MyConvolve(img, horizontal_sobel_filter) \n gy = MyConvolve(img, vertical_sobel_filter)\n\n # Compute product of derivatives\n I_xx = gx * gx\n I_xy = gx * gy\n I_yy = gy * gy\n\n # Define Gaussian kernel\n kernel = gauss_kernels(3)\n \n # Convolve product of derivatives (I_xx, I_xy, I_yy)\n W_xx = MyConvolve(I_xx, kernel)\n W_xy = MyConvolve(I_xy, kernel)\n W_yy = MyConvolve(I_yy, kernel)\n\n # Initialise response matrix\n response = np.zeros(img.shape)\n\n # Compute response for pixels that will be taken into consideration\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n w = np.array([[W_xx[x, y], W_xy[x, y]],\n [W_xy[x, y], W_yy[x, y]]])\n det_W = np.linalg.det(w)\n trace_W = np.trace(w)\n response[x, y] = det_W - 0.06 * trace_W * trace_W\n\n # Get max response from response matrix \n max_r = np.max(response) \n\n # Initialise lists for x, y coordinates\n x_list = []\n y_list = []\n\n # Select response values within 10% of maximum response\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n if response[x, y] >= (max_r * 0.1) and response[x, y] <= (max_r * 1.9):\n x_list.append(x)\n y_list.append(y)\n\n # Plot selected response values on image\n plt.figure()\n plt.imshow(img, cmap='gray')\n plt.scatter(y_list, x_list, edgecolors='blue', facecolors='none', s=81, marker='s')\n plt.savefig(image_name + '_corners.jpg')", "def drawShapes(contours, realImg, minArea=500, name=False):\n\n # Getting Shape of Real Image\n realH, realW, _ = realImg.shape\n\n thickness = realW // 275 # Setting Thickness\n fontScale = realH / 1000 # Setting fontScale\n\n if not contours:\n print(\"No Contours Found\")\n for cnt in contours:\n cntArea = cv2.contourArea(cnt) # Getting Area\n # i = -1\n # i += 1\n # color = (0, 255, 0)\n\n if cntArea < minArea:\n continue\n\n # Drawing contours\n # cv2.drawContours(realImg, [cnt], i, color, 3)\n\n # Finding Perimeters of each cnt\n peri = cv2.arcLength(cnt, True) # True for Closed Shapes\n approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)\n\n # Getting x, y co-ordinates and width, height of each cnt\n x, y, w, h = cv2.boundingRect(approx)\n\n # Making Black Rectangle around each contour\n cv2.rectangle(realImg,\n (x - thickness, y - thickness),\n (x + w + thickness, y + h + thickness),\n (0, 0, 0), thickness)\n\n # Making White Rectangle around each contour\n cv2.rectangle(realImg, (x, y), (x + w, y + h),\n (255, 255, 255), thickness // 2)\n\n if not name:\n continue\n\n corners = len(approx) # Counting Corners\n objType = None\n\n if corners == 3:\n objType = \"Triangle\"\n\n elif corners == 4:\n aspRatio = w / float(h)\n if aspRatio > 0.9 and aspRatio < 1.1:\n objType = \"Square\"\n else:\n objType = \"Rectangle\"\n\n elif corners == 5:\n objType = \"Pentagon\"\n\n elif corners == 6:\n objType = \"Hexagon\"\n\n elif corners > 7:\n if detectObject.isCircle(cnt, realImg):\n objType = \"Circle\"\n\n if objType is None:\n objType = \"Not Found\"\n\n if thickness == 1:\n thickness = 2\n\n # Setting FontFace\n fontFace = cv2.FONT_HERSHEY_DUPLEX\n\n # Getting Shape of Image\n h2, _, _ = realImg.shape\n\n # If Image Height is too Small, increase fontScale\n if h2 < 300:\n fontScale += 0.04\n elif h2 < 400:\n fontScale += 0.02\n\n textBgHeight = int(fontScale * 40)\n k = 0\n \n if h2 > 500:\n k = textBgHeight // 3\n\n # Copying the contents where we want to write text\n sub_img = realImg[y+h: y+h+textBgHeight+k, x:x+w]\n\n # Create a new black image with same shape\n rect = np.zeros(sub_img.shape, dtype=np.uint8)\n\n # Creating a new transparent image from\n # `copied image(sub_img)` and `black image`\n res = cv2.addWeighted(sub_img, .25, rect, .5, 1.0)\n\n # Replacing the part of realImg\n realImg[y+h: y+h+textBgHeight+k, x: x+w] = res\n\n # Putting White Text on Image\n cv2.putText(realImg, objType, (x, y + h + textBgHeight - 5),\n fontFace, fontScale, (255, 255, 255), thickness * 2)\n\n # # Putting Black Text on Image\n cv2.putText(realImg, objType, (x, y + h + textBgHeight - 5),\n fontFace, fontScale, (0, 0, 0), thickness // 2)", "def __CalculateCircle(self, contour):\r\n return cv2.minEnclosingCircle(contour)", "def find_squares( contours, debug=False ):\r\n #=================================================================\r\n # The Minimum and Maximum rations for width vs height for the goal\r\n # based on experimental results goal is approx 1.5:1\r\n #=================================================================\r\n MIN_RATIO = 1.3\r\n MAX_RATIO = 1.8\r\n ret = []\r\n\r\n for shape in contours:\r\n x, y, w, h = cv2.boundingRect( shape )\r\n w_h_ratio = float( w ) / float( h )\r\n if debug:\r\n print \"Area\", (w * h)\r\n print \"Width \", w\r\n print \"Height\", h\r\n if MIN_RATIO < w_h_ratio and w_h_ratio < MAX_RATIO:\r\n ret.append( shape )\r\n\r\n return( ret )", "def detect(self, c):\n # Compute perimeter of the contour\n perimeter = cv2.arcLength(c, True)\n\n # Approximate the contour curve\n # in order to obtain approximate\n # number of vertices given by the\n # intersection of the short lines\n approximate = cv2.approxPolyDP(c, 0.04 * perimeter, True)\n\n # Return blob's relative\n # target class based on\n # number of approximated\n # vertices\n if len(approximate) == 3:\n return \"triangle\"\n\n elif len(approximate) == 4:\n return \"square\"\n\n else:\n return \"circle\"", "def test_rhombus_area(self):\n self.assertEqual(17.5, rhombus_area(\n self.values['diagonal_1'], self.values['diagonal_2']))", "def get_dimensions_from_contour(img, cntr, kernel):\n\tmask = np.zeros_like(img) # mask will contain the fitted and adjusted ellipse of a single obstacle\n\tellipse = cv2.fitEllipse(cntr)\n\tx, y, obj_length, obj_height = cv2.boundingRect(cntr)\n\trect = cv2.minAreaRect(cntr)\n\n\tequi_diameter = obj_length # bounding rectangle gives a better approximation of diameter\n\n\tbox = cv2.boxPoints(rect)\n\tbox = np.int0(box)\n\tmask = cv2.ellipse(mask, ellipse, (255, 255, 255), -1) # draw the fitted ellipse\n\trows = mask.shape[0]\n\tcols = mask.shape[1]\n\tM = np.float32([[1, 0, 0], [0, 1, equi_diameter / 4]]) # shift mask down to match obstacle, not edge\n\tmask = cv2.warpAffine(mask, M, (cols, rows))\n\tmask = cv2.erode(mask, kernel, iterations=3) # erode the mask to remove background points\n\treturn mask, box, x, y, obj_length, obj_height", "def make_mask(shape, contour):\n mask = np.zeros(shape, np.int32)\n cv2.drawContours(mask, [contour], 0, (255), -1)\n return mask", "def test_get_quad_rhombus_all_int(self):\n result = get_quadrilateral_type(3, 3, 3, 3, 44, 136, 44, 136)\n self.assertEqual(result, 'rhombus')", "def get_contour_features(mask,selectcell=\"centered\"):\r\n \r\n #binarize image (everything above 0 becomes 1)\r\n mask = np.clip(mask,a_min=0,a_max=1)\r\n\r\n #for contours, dont use RETR_TREE, but RETR_EXTERNAL as we are not interested in internal objects\r\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contours = list(contours)\r\n \r\n #in case there is no contour found, add a dummy contour\r\n if len(contours)==0:\r\n contours = [np.array([[[0, 0]],[[0, 1]],[[1, 1]],[[1, 0]]])] #generate a dummy contour\r\n\r\n #Sort contours, longest first\r\n contours.sort(key=len,reverse=True)\r\n contours = [c for c in contours if len(c)>4] #proper contour should have at least 5 points\r\n hulls = [cv2.convexHull(contour,returnPoints=True) for contour in contours]\r\n\r\n mu_origs = [cv2.moments(contour) for contour in contours]\r\n mu_hulls = [cv2.moments(hull) for hull in hulls]\r\n\r\n area_origs = [mu_orig[\"m00\"] for mu_orig in mu_origs]\r\n area_hulls = [mu_hull[\"m00\"] for mu_hull in mu_hulls]\r\n\r\n #drop events where area is zero\r\n hulls = [hulls[i] for i in range(len(hulls)) if area_origs[i]>0] \r\n contours = [contours[i] for i in range(len(contours)) if area_origs[i]>0]\r\n mu_origs = [mu_origs[i] for i in range(len(mu_origs)) if area_origs[i]>0]\r\n mu_hulls = [mu_hulls[i] for i in range(len(mu_hulls)) if area_origs[i]>0]\r\n area_hulls = [area_hulls[i] for i in range(len(area_hulls)) if area_origs[i]>0]\r\n area_origs = [area_origs[i] for i in range(len(area_origs)) if area_origs[i]>0]\r\n \r\n \r\n pos_x = [int(mu_orig['m10']/mu_orig['m00']) for mu_orig in mu_origs]\r\n pos_y = [int(mu_orig['m01']/mu_orig['m00']) for mu_orig in mu_origs]\r\n\r\n \r\n if selectcell == \"smooth\":\r\n #compute the area ratio (roughness of contour)\r\n area_ratio = np.array(area_hulls)/np.array(area_origs)\r\n #get the contour with minimum roughness (smooth contour)\r\n sorter = np.argsort(area_ratio) #smallest first\r\n\r\n if selectcell == \"centered\":\r\n #select contour that is closest to the center of the image. \r\n #In iPAC, cells are usually in the center.\r\n mid_x,mid_y = mask.shape[0]/2,mask.shape[1]/2 #middle of the image\r\n BB = [cv2.boundingRect(c) for c in contours] #get a bounding box around the object\r\n distances = [np.sqrt((mid_x-bb[0])**2 + (mid_y-bb[1])**2) for bb in BB]\r\n sorter = np.argsort(distances) #smallest first\r\n \r\n #sort values with respect to chosen metric (area_ratio or distance)\r\n contours = [contours[s] for s in sorter]\r\n hulls = [hulls[s] for s in sorter]\r\n pos_x = [pos_x[s] for s in sorter]\r\n pos_y = [pos_y[s] for s in sorter]\r\n mu_origs = [mu_origs[s] for s in sorter]\r\n area_origs = [area_origs[s] for s in sorter]\r\n area_hulls = [area_hulls[s] for s in sorter]\r\n \r\n # draw mask of the chosen contour\r\n mask = np.zeros_like(mask)\r\n cv2.drawContours(mask,contours,0,1,cv2.FILLED)# produce a contour that is filled inside\r\n\r\n hull = hulls[0]#[0:n_contours]\r\n pos_x = pos_x[0]\r\n pos_y = pos_y[0] \r\n mu_orig = mu_origs[0]#[0:n_contours]\r\n area_orig = area_origs[0]#[0:n_contours]\r\n area_hull = area_hulls[0]#[0:n_contours]\r\n \r\n if area_orig>0:\r\n area_ratio = area_hull/area_orig\r\n else:\r\n area_ratio = np.nan\r\n\r\n arc = cv2.arcLength(hull, True) \r\n circularity = 2.0 * np.sqrt(np.pi * mu_orig[\"m00\"]) / arc\r\n\r\n\r\n dic = {\"mask\":mask,\"pos_x\":pos_x,\"pos_y\":pos_y,\"area_orig\":area_orig,\"area_hull\":area_hull,\\\r\n \"area_ratio\":area_ratio,\"circularity\":circularity}\r\n return dic", "def detecting_characters(contour, image_print, number_file):\n character = []\n image = image_print.copy()\n widths = [17, 10]\n\n for w in widths:\n low_limit, high_limit, max_aspect, min_aspect, whole_area = estimation_area(image_print, w, 35)\n for c in contour:\n x, y, w, h = cv2.boundingRect(c)\n area_contour = w * h\n aspect_ratio = w / h\n\n if (area_contour/whole_area >= low_limit) and (area_contour/whole_area <= high_limit) and (aspect_ratio < max_aspect) and (aspect_ratio > min_aspect):\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) # DRAWING THE PLATE'S RECTANGLE\n\n\n # print(w, h)\n # cv2.imshow('Drawing', image)\n # cv2.waitKey(0)\n\n rectangle_char = (x, y, w, h) # x = UPPER - LEFT CORNER OF THESE RECTANGLE\n character.append(rectangle_char) # FILLING THE CHARACTER VARIABLE.\n\n\n image_plate_char = image\n\n return image_plate_char, character", "def get_horizontal_atmos_resolution(description):\n if description == \"none\":\n nh = \"0\"\n lats = \"0\"\n elif re.search(\" icosahedral-hexagonal\", description):\n match = re.search(r\"(?P<nh>\\d+)-point\\sicosahedral-hexagonal\", description)\n nh = match.groupdict()[\"nh\"]\n lats = 0\n\n elif re.search(r\"\\s?(?P<d1>\\d+)[^.]\\s?x\\s?(?P<d2>\\d+)[^.]x\\s?(?P<d3>\\d+)[^.]^cubeface\", description):\n match = re.search(r\"\\s?(?P<d1>\\d+)[^.]\\s?x\\s?(?P<d2>\\d+)[^.]x\\s?(?P<d3>\\d+)[^.]\", description)\n nh = str(int(match.groupdict()[\"d1\"]) * int(match.groupdict()[\"d2\"]) * int(match.groupdict()[\"d3\"]))\n lats = None\n elif re.search(r\"\\d+[^.]\\s?x\\s?\\d+[^.]longitude\\/latitude;\",description):\n match = re.search(r\"(?P<lons>\\d+)[^.]\\s?x\\s?(?P<lats>\\d+)[^.]longitude\\/latitude\",description)\n nh = str(int(match.groupdict()[\"lons\"]) * int(match.groupdict()[\"lats\"]))\n lats = match.groupdict()[\"lats\"]\n elif re.search(r\"\\s?x\\s?\", description):\n match = re.search(r\"\\s?(?P<lons>\\d+)[^.]\\s?x\\s?(?P<lats>\\d+)[^.]\", description)\n nh = str(int(match.groupdict()[\"lons\"]) * int(match.groupdict()[\"lats\"]))\n lats = match.groupdict()[\"lats\"]\n elif re.search(\"grid points in total\", description):\n match = re.search(r\"(?P<nh>\\d+)\\s?grid points in total\", description)\n nh = match.groupdict()[\"nh\"]\n lats = None\n elif re.search(\"cells\", description):\n match = re.search(r\";\\s?(?P<nh>\\d+,?\\d{3}?,?(\\d{3})?).*cells\", description)\n if not match:\n match = re.search(r\"with\\s?(?P<nh>\\d+,?\\d{3}?,?(\\d{3})?).*cells\", description)\n\n nh = match.groupdict()[\"nh\"]\n if \",\" in nh:\n nh = nh.replace(\",\", \"\")\n lats = None\n else:\n nh = \"64800\"\n lats = None\n\n return nh, lats", "def __CalculateConvexHull(self, contour):\r\n return cv2.convexHull(contour)", "def get_shouldering(binary_mask):\n shoulder_dict = get_shoulders(binary_mask)\n\n top_y_min = shoulder_dict[\"top_y_min\"]\n top_y_max = shoulder_dict[\"top_y_max\"]\n top_x_min = shoulder_dict[\"top_x_min\"]\n top_x_max = shoulder_dict[\"top_x_max\"]\n\n bottom_y_min = shoulder_dict[\"bottom_y_min\"]\n bottom_y_max = shoulder_dict[\"bottom_y_max\"]\n bottom_x_min = shoulder_dict[\"bottom_x_min\"]\n bottom_x_max = shoulder_dict[\"bottom_x_max\"]\n\n top_shoulder = binary_mask[top_y_min:top_y_max, top_x_min:top_x_max]\n bottom_shoulder = binary_mask[bottom_y_min:bottom_y_max, bottom_x_min:bottom_x_max]\n\n white_top = get_biomass(top_shoulder)\n black_top = (top_shoulder.shape[0] * top_shoulder.shape[1]) - white_top\n\n white_bottom = get_biomass(bottom_shoulder)\n black_bottom = (bottom_shoulder.shape[0] * bottom_shoulder.shape[1]) - white_bottom\n\n return black_top, black_bottom", "def get_digit(cell, border_size=5):\n gray = cv.cvtColor(cell,cv.COLOR_BGR2GRAY)\n thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)[1]\n thresh = clear_border(thresh)\n\n cnts, hierarchy = cv.findContours(thresh.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key=cv.contourArea, reverse=True)\n\n if len(cnts) > 0:\n cnt = cnts[0]\n outline = cell.copy()\n digit = np.array([[[cnt[:,:,0].min()-border_size, cnt[:,:,1].min()-border_size]], [[cnt[:,:,0].max()+border_size, cnt[:,:,1].min()-border_size]], [[cnt[:,:,0].min()-border_size, cnt[:,:,1].max()+border_size]], [[cnt[:,:,0].max()+border_size, cnt[:,:,1].max()+border_size]]])\n cv.drawContours(outline, digit, -1, (0,255,0), 3)\n corners = digit.sum(1)\n zoom = transform(thresh, corners)\n height, width = zoom.shape\n border_top, border_bottom, border_left, border_right = 0,0,0,0\n if height > width:\n border_right = int(np.round(((height - width) / 2) - 0.1))\n border_left = int(np.round(((height - width) / 2) + 0.1))\n elif width > height:\n border_top = int(np.round(((width - height) / 2) - 0.1))\n border_bottom = int(np.round(((width - height) / 2) + 0.1))\n final = cv.copyMakeBorder(zoom, border_top, border_bottom, border_left, border_right, borderType=cv.BORDER_CONSTANT, value=0)\n \n else:\n final = thresh\n \n return final", "def get_contour_bbox_from_rle(rle, width, height, return_mask=True,):\n mask = rle_to_mask(rle, height, width).copy()\n cnts = grab_contours(\n cv2.findContours(\n mask, \n cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_SIMPLE\n ))\n x,y,w,h = cv2.boundingRect(cnts[0])\n \n if return_mask:\n return (x,y,x+w,y+h), mask\n else:\n return (x,y,x+w,y+h)", "def goodPlace(contour):\n perimeter = cv2.arcLength(contour, True)\n x, y, w, h = cv2.boundingRect(contour)\n if y<=0 or x<=0:\n return False\n elif y+h >=2016 or x+w>=3840:\n return False\n return True", "def find_contour(ctx: Context):\n cv2.copyTo(ctx.filter_image, np.ones_like(ctx.temp_image1), ctx.temp_image1)\n contours, _ = cv2.findContours(ctx.temp_image1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # take the 5 biggest areas\n contours = sorted(contours, key=lambda c: math.fabs(cv2.contourArea(c)), reverse=True)[:5]\n\n # approximate contours with poly line\n ctx.contours = [cv2.approxPolyDP(c, 2, True) for c in contours]", "def harris_corner_detector(im):\n\n conv_vec = np.array([[1, 0, -1]])\n\n x_der = convolve(im, conv_vec)\n y_der = convolve(im, conv_vec.transpose())\n\n x_der_2 = sol4_utils.blur_spatial(x_der * x_der, 3)\n\n y_der_2 = sol4_utils.blur_spatial(y_der*y_der, 3)\n x_y_der = sol4_utils.blur_spatial(x_der*y_der, 3)\n y_x_der = sol4_utils.blur_spatial(y_der*x_der, 3)\n response = (x_der_2 * y_der_2 - x_y_der * y_x_der) - K * (x_der_2 + y_der_2) ** 2\n bool_response = non_maximum_suppression(response)\n coor_arr = np.where(bool_response)\n coor_arr = [coor_arr[1], coor_arr[0]]\n coor_arr = np.column_stack(coor_arr)\n\n return coor_arr", "def find_buoy(img, display_results=False):\n\n greyscale_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_GRAY2BGR)\n cm_image = cv2.applyColorMap(greyscale_image, cv2.COLORMAP_VIRIDIS)\n\n cm_copy_image = cm_image\n cv2.copyTo(cm_image, cm_copy_image)\n cm_image = cv2.medianBlur(cm_image, 5) # Removes salt and pepper noise\n\n mask = mask_sonar_image(cm_image, display_results)\n\n cm_circs = cv2.findContours(mask, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n cm_circs = list(filter(lambda x: (cv2.contourArea(x) > 250), cm_circs))\n\n cm_circs = sorted(cm_circs, key=lambda x: (cv2.arcLength(x, True)**2/(\n 4*math.pi*cv2.contourArea(x))), reverse=False)\n\n filtered_circles = cm_circs[0:1]\n\n circle_positions = []\n for circle in filtered_circles: # Find center of circle code\n M = cv2.moments(circle)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n circle_positions.append((cX, cY, (cv2.arcLength(circle, True)**2/(\n 4*math.pi*cv2.contourArea(circle))),\n cv2.contourArea(circle)))\n\n if display_results:\n cv2.drawContours(cm_copy_image, filtered_circles, -1, (0, 255, 0), 2)\n cv2.imshow(\"found_buoys\", cm_copy_image)\n cv2.waitKey(0)\n\n return circle_positions" ]
[ "0.6477798", "0.6227886", "0.61438435", "0.5769389", "0.55741864", "0.55740094", "0.5542627", "0.54590064", "0.5348797", "0.5329123", "0.52930325", "0.52622515", "0.52615017", "0.5233085", "0.5198105", "0.5196476", "0.51726085", "0.51662135", "0.51260275", "0.51097596", "0.50919193", "0.5085527", "0.50803405", "0.50799507", "0.50607365", "0.5050143", "0.5047555", "0.50223494", "0.5016435", "0.5005341" ]
0.66473997
0
Draw idxth rhombus on image
def __draw_rhombus(img, rhombus): for i, point in enumerate(rhombus): p1 = tuple(rhombus[i][0]) p2 = tuple(rhombus[(i+1) % 4][0]) cv2.line(img, p1, p2, color=(29, 131, 255), thickness=2) return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self, img, idx=None):\n if idx is None:\n for rhombus in self.rhombuses:\n img = self.__draw_rhombus(img, rhombus)\n else:\n img = self.__draw_rhombus(img, self.rhombuses[idx])\n\n return img", "def draw_rhombus(self, screen):\n pygame.gfxdraw.filled_polygon(screen, self.list_of_coordinates, self.color)\n\n return screen", "def footprint_corner_indices():", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def draw_heaters(ax, windtunnel):\n draw_heater(ax, windtunnel.heater_l)\n draw_heater(ax, windtunnel.heater_r)", "def gen_rhombus(width):\n for row in range(1, width +1, 2):\n yield f\"{(STAR * row).center(width)}\"\n\n for row in range(width -2, 0, -2):\n yield f\"{(STAR * row).center(width)}\"", "def draw_rh_lines(data):\n #hnd = extract_right_hand(data);\n hnd = np.array(data['crop']);\n hand.draw_hand_lines(hnd,data['rhkpss'][data['i']]);\n return hnd;", "def phantom_rectangles(n_points,R):\n \n \n #Rescaling according to image size \n R[:,0] = R[:,0]*n_points/2\n R[:,1] = R[:,1]*n_points/2\n R[:,2] = R[:,2]*n_points/2\n R[:,3] = R[:,3]*n_points/2\n R[:,4] = R[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = R.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sui rettangoli\n x_new = x - R[k,0]\n y_new = y - R[k,1]\n\n u = abs(x_new*math.cos(R[k,4])+y_new*math.sin(R[k,4]))\n v = abs(-x_new*math.sin(R[k,4])+y_new*math.cos(R[k,4]))\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (u[i,j] < R[k,2]/2 and v[i,j] < R[k,3]/2):\n phantom1[i,j,k] = R[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def __init__(self, w, h):\n self.w = w\n self.h = h\n self.size = self.w*self.h\n self.data = [IColor() for x in range(self.size)]\n self.temp = [IColor() for x in range(self.size)]", "def drawHarrisSubPixel(img, blockSize=2, ksize=3, k=0.04, color1=(0,0,255), color2=(0,255,0)):\n\ttmp = img.copy()\n\tgray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)\n\tgray = np.float32(gray)\n\tdst = cv2.cornerHarris(gray, blockSize, ksize, k)\n\tdst = cv2.dilate(dst, None)\n\tret, dst = cv2.threshold(dst, 0.01*dst.max(), 255, 0)\n\tdst = np.uint8(dst)\n\tret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n\tcriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)\n\tcorners = cv2.cornerSubPix(gray, np.float32(centroids), (5,5), (-1,-1), criteria)\n\tres = np.hstack((centroids, corners))\n\tres = np.int0(res)\n\ttmp[res[:,1], res[:,0]] = color1\n\ttmp[res[:,3], res[:,2]] = color2\n\treturn tmp", "def Hexagon(image):\n return x, y", "def draw_nonogram(self):\n image = Image.new(\"RGB\", (self.nonogram_size * 50, self.nonogram_size * 50), (255, 255, 255))\n draw = ImageDraw.Draw(image)\n\n for index, square in enumerate(reduce(lambda x, y: x+y, self.grid), 0):\n\n #print(square)\n x = index % self.nonogram_size\n y = index // self.nonogram_size\n coord = [(x * 50, y * 50), ((x + 1) * 50, (y + 1) * 50)]\n if square == EMPTY:\n draw.rectangle(coord, fill=(255, 255, 255))\n if square == FILLED:\n draw.rectangle(coord, fill=(0, 0, 0))\n return image", "def hilbertPlot(f):\n order = getOrder(f)\n img = np.zeros(shape=(2 ** order, 2 ** order))\n #for d in range(len(f)):\n for d in range((2 ** order) ** 2):\n x, y = d2xy(2 ** order, d)\n img[x, y] = f[d % len(f)]\n return img", "def demo(self):\n self.clear()\n\n white = neo.Color(255, 255, 255)\n black = neo.Color(0, 0, 0)\n red = neo.Color(120, 0, 0)\n green = neo.Color(0, 255, 0)\n blue = neo.Color(0, 0, 255)\n pink = neo.Color(255, 102, 178)\n \n state = [[[0,0,0]] * self.width] * self.height\n stepsize = (1.0/self.n_leds)\n lednr = 0\n for x in range(self.width):\n for y in range(self.height):\n h_start = (0 + lednr * (2*stepsize)) % 1 #* (y*self.width + x)\n lednr = lednr + 1\n s_start = 0\n v_start = 1\n hsv = [h_start,s_start,v_start]\n state[x][y] = hsv\n self.set([x,y], hsv_to_neopixel_color(hsv[0], hsv[1], hsv[2]))\n\n tint = 0\n while(True): \n for x in range(self.width):\n for y in range(self.height):\n hsv = state[x][y]\n\n new_h = (hsv[0] + stepsize/60.0) % 1.0\n new_s = (hsv[1] + stepsize/20.0) % 1.0\n new_v = hsv[2] #+ stepsize/20.0) % 1.0\n\n state[x][y][0] = new_h\n state[x][y][1] = new_h\n state[x][y][2] = new_v\n\n self.set([x,y], hsv_to_neopixel_color(\n (translate(new_h, 0.0, 1.0, 0.0, 0.1) + tint) % 1.0, \n to_sine(new_s), \n new_v))\n \n tint = (tint + stepsize/20.0) % 1\n\n self.draw()\n sleep(1.0/40)", "def draw_grid(self):\n plt.imshow(py.array(\n map(lambda x: map(lambda y: mplc.colorConverter.to_rgb(colord[y]), x), self.create_grid(self.graph))),\n interpolation='nearest')\n plt.show()", "def make_hexplot(XX,VV,gs=100):\n coord = [[0,0,0],[0,400,-400],[-400,400,0],[-400,0,400],[0,-400,400],[400,-400,0],[400,0,-400],[0,800,-800],[-800,800,0],[-800,0,800],[0,-800,800],[800,-800,0],[800,0,-800],[800,0,0],[-400,800,-400],[-400,-800,400],[-800,0,0],[400,-800,400],[400,800,-400]]\n\n # Horizontal cartesian coords\n hcoord = [c[0] for c in coord]\n\n # Vertical cartersian coords\n vcoord = [2. * np.sin(np.radians(60)) * (c[1] - c[2]) /3. for c in coord]\n\n fig, ax = plt.subplots(1,figsize=(20,20))\n ax.set_aspect('equal')\n\n nhex=len(coord)\n # Add some coloured hexagons\n for i in range(0,nhex):\n xi=(XX[:,0])+hcoord[i]/1000.\n yi=(YY[:,1])+vcoord[i]/1000.\n idist=np.sqrt(xi**2+yi**2)\n iindx=(idist<0.2)*(np.fabs(XX[:,2])<0.2)\n hex = RegularPolygon((hcoord[i], vcoord[i]), numVertices=6, radius=800./3., \n orientation=np.radians(30), \n facecolor='white', alpha=1., edgecolor='k',zorder=i*2)\n ax.add_patch(hex)\n ax.hist2d(VV[:,0][iindx]*1.5+hcoord[i],VV[:,1][iindx]*2.-375.+220.+vcoord[i],range=[[hcoord[i]-220.,hcoord[i]+220],[vcoord[i]-220,vcoord[i]+220]],bins=gs,cmin=1.0e-50,rasterized=True,density=True,zorder=i*2+1)\n ax.set_xlim(-1200,1200)\n ax.set_ylim(-1200,1200)\n ax.set_xlabel(r'$x\\ (\\mathrm{pc})$',fontsize=20)\n ax.set_ylabel(r'$y\\ (\\mathrm{pc})$',fontsize=20)\n ax.tick_params(axis='both', labelsize=20)\n\n #hex2 = RegularPolygon((800,2.*np.sin(np.radians(60))*(1600) /3.), numVertices=6, radius=800./3.,orientation=np.radians(30), \n # facecolor='white', alpha=1., edgecolor='k',zorder=0)\n #ax.add_patch(hex2)\n plt.savefig('Hexgrid.pdf',bbox_inches='tight')\n #plt.show()\n plt.close()", "def draw_H(image, coords, color=(0, 255, 0)):\n image_with_H = image.copy()\n\n canvas = image_with_H[coords[0]:coords[0] + 24,\n coords[1]:coords[1] + 20]\n\n canvas[:, :3] = color\n canvas[:, -3:] = color\n canvas[11:14] = color\n\n return image_with_H", "def rebin2 (h, name, gx=1, gy=1):\n old_nx = h.GetNbinsX()\n old_ny = h.GetNbinsY()\n new_nx = old_nx//gx\n new_ny = old_ny//gy\n hnew = ROOT.TH2F (name,\n h.GetTitle(),\n new_nx,\n h.GetXaxis().GetXmin(),\n h.GetXaxis().GetXmax(),\n new_ny,\n h.GetYaxis().GetXmin(),\n h.GetYaxis().GetXmax())\n for ix in range(0, new_nx):\n for iy in range(0, new_ny):\n sum = 0\n for iix in range(0, gx):\n for iiy in range(0, gy):\n sum += h.GetBinContent(ix*gx+iix+1, iy*gy+iiy+1)\n hnew.SetBinContent(ix+1, iy+1, sum)\n return hnew", "def draw_h(self):\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(50)", "def horizontal_arcs_iglu():\n arc(screen, BLACK, (50, 560, 300, 20), 3.14, 0)\n arc(screen, BLACK, (60, 510, 280, 20), 3.14, 0)\n arc(screen, BLACK, (80, 460, 240, 20), 3.14, 0)\n arc(screen, BLACK, (120, 420, 160, 20), 3.14, 0)", "def hadamard2(n):\n # set up plot stuff\n fig, ax = plt.subplots(figsize=(10, 10))\n ax.set_xticks(range(n + 1)) # set axis ranges\n ax.set_yticks(range(n + 1))\n plt.xticks([]) # remove axis labels\n plt.yticks([])\n ax.set_aspect(aspect=1) # ensure it's a square and not a rectangle\n # invert y axis so the origin is the top left\n ax.set_ylim(ax.get_ylim()[::-1])\n\n def recurse(tlx, tly, brx, bry, flag):\n \"\"\" Given coords for the top left and bottom right of a square, recursively pass a boolean flag\n to see if we should draw it\n \"\"\"\n if(tlx + 1 == brx): # single square (width == 1)\n if flag: # draw black square\n ax.add_patch(Rectangle((tly, brx - 1), 1, 1, color='black'))\n return # no need to recurse anymore\n # here's the recursive part:\n # we go in the order of top left, top right, bottom left, bottom right\n # we negate the flag in the bottom right, and we keep the same flag for the rest\n recurse(tlx, tly, (tlx + brx) // 2, (tly + bry) // 2, flag)\n recurse((tlx + brx) // 2, tly, brx, (tly + bry) // 2, flag)\n recurse(tlx, (tly + bry) // 2, (tlx + brx) // 2, bry, flag)\n recurse((tlx + brx) // 2, (tly + bry) // 2, brx,\n bry, not flag) # invert bottom right\n\n recurse(0, 0, n, n, True) # initial case, pass corners of entire matrix\n plt.show()", "def Haut():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1-20,X2,Y2-20)", "def HOG(img, x, y):\n #TODO: write a HOG descriptor here\n des=[]\n row=0\n sub_image = img[x-8:x+8,y-8:y+8]\n while row < len(sub_image):\n col=0\n while col < len(sub_image[0]):\n temp_vector = [0 for i in range(8)]\n new_subimage = sub_image[row:row+4,col:col+4]\n x_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=1,dy=0)\n y_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=0,dy=1)\n theta = np.empty([x_gradient.shape[0],x_gradient.shape[1]])\n for i in range(len(x_gradient)):\n for j in range(len(x_gradient[0])):\n if x_gradient[i,j] == 0:\n theta[i,j] = 90\n else:\n theta[i,j] = np.arctan(y_gradient[i,j]/x_gradient[i,j])*(180/np.pi)\n theta_iter = theta.flatten() #To avoid nested for loops for 4x4 theta\n for i in range(len(theta_iter)):\n if theta_iter[i] < 45:\n temp_vector[0]=temp_vector[0]+1\n elif theta_iter[i] >= 45 and theta_iter[i] < 90:\n temp_vector[1]=temp_vector[1]+1\n elif theta_iter[i] >= 90 and theta_iter[i] < 135:\n temp_vector[2]=temp_vector[2]+1\n elif theta_iter[i] >= 135 and theta_iter[i] < 180:\n temp_vector[3]=temp_vector[3]+1\n elif theta_iter[i] >= 180 and theta_iter[i] < 225:\n temp_vector[4]=temp_vector[4]+1\n elif theta_iter[i] >= 225 and theta_iter[i] < 270:\n temp_vector[5]=temp_vector[5]+1\n elif theta_iter[i] >= 270 and theta_iter[i] < 315:\n temp_vector[6]=temp_vector[6]+1\n elif theta_iter[i] >= 315 and theta_iter[i] < 360:\n temp_vector[7]=temp_vector[7]+1\n des.extend(temp_vector)\n col=col+4\n row=row+4\n return des", "def draw_zenith(self, observatory):\n defaults = dict(color='green',alpha=0.75,lw=1.5)\n for k,v in defaults.items():\n kwargs.setdefault(k,v)\n\n # RA and Dec of zenith\n ra_zenith, dec_zenith = np.degrees(observatory.radec_of(0, '90'))\n xy = self.proj(ra_zenith, dec_zenith)\n \n self.plot(*xy,marker='+',ms=10,mew=1.5, **kwargs)\n self.tissot(ra_zenith, dec_zenith, DECAM, 100, fc='none',**kwargs)", "def show(self):\n data = []\n for row in self.grid:\n mid, bottom = [], []\n for node in row:\n \tmid += [0, int(node.right)]\n \tbottom += [int(node.down), 1]\n data += mid + [0] + bottom + [0] \n data[self.width*2+1] = 1\n data[-1] = 1\n data += (self.width*2) * [0]\n im = Image.new('1', (self.width*2+1, self.height*2+1))\n im.putdata(data)\n im.save('maze.png')\n im.show()", "def create_border_image(n):\n img=create_zeroed_image(n)\n for i in range(n):\n for j in range(n):\n img[i][0]=1\n img[0][j]=1\n img[i][n-1]=1\n img[n-1][j]=1\n return img", "def refractive_index(self):\n wd = np.arange(80,820,10)\n nd = self.boundary.imat.refractive_index(wd) \n\n plt.plot(wd, nd)\n\n return wd, nd", "def drawAxis(image, cameraMatrix, distCoeffs, rvec, tvec, length):\n pass", "def get_patch(i,j,im,h=H): #X\n print(i,j)\n return im[(i-h):(i+h+1),(j-h):(j+h+1)]", "def build_cylinder(self, n_phis):\n index = glGenLists(1)\n phis = [float(i)*2.0*numpy.pi/float(n_phis) for i in range(n_phis+1)]\n phi_pairs = zip(phis, phis[1:])\n glNewList(index, GL_COMPILE)\n glBegin(GL_QUADS)\n for phi1,phi2 in phi_pairs:\n dot1 = min(max(numpy.cos(phi1), 0.0), 1.0)\n dot2 = min(max(numpy.cos(phi2), 0.0), 1.0)\n glTexCoord1f(dot1)\n glVertex3f(-0.5, numpy.sin(phi1), numpy.cos(phi1))\n glTexCoord1f(dot1)\n glVertex3f(0.5, numpy.sin(phi1), numpy.cos(phi1))\n glTexCoord1f(dot2)\n glVertex3f(0.5, numpy.sin(phi2), numpy.cos(phi2))\n glTexCoord1f(dot2)\n glVertex3f(-0.5, numpy.sin(phi2), numpy.cos(phi2))\n glEnd()\n glEndList()\n return index" ]
[ "0.6799747", "0.60946214", "0.58650184", "0.57785463", "0.577379", "0.56593394", "0.56519073", "0.56368244", "0.5629976", "0.5611365", "0.5590604", "0.55857366", "0.55807704", "0.55708385", "0.55639386", "0.5496588", "0.5472634", "0.5464442", "0.5405881", "0.53580445", "0.5355426", "0.535197", "0.53433514", "0.53404045", "0.53319496", "0.5326814", "0.53038806", "0.5300426", "0.5298979", "0.52975595" ]
0.7903363
0
Find leftmost, rightmost, uppermost, and bottommost point of a quadrilateral and count maximum length between points as a rhombus
def get_max_length_diff_in_quad(points): leftmost, uppermost, rightmost, bottommost = (points[0, 0] for i in range(4)) for point in points: x = point[0, 0] y = point[0, 1] if x < leftmost[0]: # Point is located on the left side of leftmost point leftmost = point[0] elif x > rightmost[0]: rightmost = point[0] elif y < uppermost[1]: uppermost = point[0] elif y > bottommost[1]: bottommost = point[0] length_diff = [cv2.norm(uppermost - leftmost), cv2.norm(rightmost - uppermost), cv2.norm(bottommost - rightmost), cv2.norm(leftmost - bottommost)] return np.max(length_diff)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_quadrature_points(self) -> int:", "def getIndividualTopLengths(self):\n nquad = self.getNumQuads()\n lengths = np.zeros(nquad)\n for i in range(nquad):\n P0, P1, P2, P3 = self._quadrilaterals[i]\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n lengths[i] = (p1 - p0).mag() / 1000.0\n return lengths", "def find_height_wo(self): \n if not (self.left or self.right):\n return 0\n if not self.left:\n return 1 + max(0, self.right.find_height_wo())\n if not self.right:\n return 1 + max(self.left.find_height_wo(), 0)\n return 1 + max(self.left.find_height_wo(), self.right.find_height_wo())", "def getArea(rob):\r\n def dfs(visit, i, j):\r\n visit.add((i, j))\r\n for k in range(4):\r\n newi, newj = i + x[k], j + y[k]\r\n if (newi, newj) in visit or not rob.move(k):\r\n continue\r\n dfs(visit, newi, newj)\r\n rob.move((k + 2) % 4)\r\n visit = set()\r\n dfs(visit, 0, 0)\r\n return len(visit)", "def max_dim(elements, coordinates):\n atom_vdw_vertical = np.matrix(\n [[atomic_vdw_radius[i.upper()]] for i in elements])\n atom_vdw_horizontal = np.matrix(\n [atomic_vdw_radius[i.upper()] for i in elements])\n dist_matrix = euclidean_distances(coordinates, coordinates)\n vdw_matrix = atom_vdw_vertical + atom_vdw_horizontal\n re_dist_matrix = dist_matrix + vdw_matrix\n final_matrix = np.triu(re_dist_matrix)\n i1, i2 = np.unravel_index(final_matrix.argmax(), final_matrix.shape)\n maxdim = final_matrix[i1, i2]\n return i1, i2, maxdim", "def Extrema(self):\n ymin = np.min(self._corners[:, 1])\n xmin = np.min(self._corners[:, 0])\n ymax = np.max(self._corners[:, 1])\n xmax = np.max(self._corners[:, 0])\n return ymin, xmin, ymax, xmax", "def quadrant(pAx, pAy, pBx, pBy):\n###############################################################################\n\n if (pBx>pAx and pBy>pAy):\n return 1\n elif (pBx<pAx and pBy>pAy):\n return 2\n elif (pBx<pAx and pBy<pAy):\n return 3\n elif (pBx>pAx and pBy<pAy):\n return 4\n else:\n return 0", "def submax(left, middle, right):\n L = middle - left # L and R are both positive if middle is the\n R = middle - right # observed max of the integer samples\n return 0.5 * (R - L) / (R + L)\n # Derivation: Consider a quadratic q(x) := P(0) - P(x). Then q(x) has\n # two roots, one at 0 and one at z, and the extreme is at (0+z)/2\n # (i.e. at z/2)\n # q(x) = bx*(x-z) # a may be positive or negative\n # q(1) = b*(1 - z) = R\n # q(-1) = b*(1 + z) = L\n # (1+z)/(1-z) = L/R (from here it's just algebra to find a)\n # z + 1 = R/L - (R/L)*z\n # z*(1+R/L) = R/L - 1\n # z = (R/L - 1)/(R/L + 1) = (R-L)/(R+L)", "def maxArea(height):\n num_pt = len(height)\n max_area = 0\n for i in range(num_pt):\n for j in range(i + 1, num_pt):\n h = min(height[i], height[j])\n a = h * (j - i)\n if a > max_area:\n max_area = a\n return max_area", "def find_B(self):\n max_lb = 0\n for arc in self.arcs():\n lb = self.arc_info[arc[0]]['lower_bound']\n max_lb = max(max_lb, lb)\n n = len(self)\n m = len(list(self.edges()))\n return((m - n + 2)*max_lb)", "def island_perimeter(grid):\n total = 0\n for b in range(len(grid)):\n for a in range(len(grid[b])):\n # left corner\n if (a == 0) and (b == 0):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right corner\n elif (a == len(grid[b]) - 1) and b == 0:\n if grid[b][a] == 1:\n total = total + 2\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # lower-left corner\n elif a == 0 and b == (len(grid) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n # lower-right corner\n elif b == (len(grid) - 1) and a == (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # top edge\n elif (b == 0 and a > 0) and a < (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # left edge\n elif (b > 0 and b < (len(grid) - 1)) and ((a == 0) and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right edge\n elif (b > 0 and (b < len(grid) - 1)) and (a == len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # bottom edge\n elif (b == len(grid) - 1) and a > 0 and a < len(grid[b]) - 1:\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # cases that are neither edges nor corners\n elif (b > 0 and b < len(grid) - 1) and (a > 0 and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n return total", "def get_bounds_halo(self):\n bottom_right = np.asarray([self.coords_halo[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords_halo[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left", "def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max", "def maxAreaOfIsland(self, grid):\n \n def helper(x, y):\n if x < 0 or x >= len(grid) or y < 0 or y >= len(grid[0]) or grid[x][y] == 'X':\n return 0\n if grid[x][y] == 1:\n grid[x][y] = 'X'\n return 1 + helper(x - 1, y) + helper(x + 1, y) + helper(x, y + 1) + helper(x, y - 1)\n else:\n grid[x][y] = 'X'\n return 0\n \n max_area = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n max_area = max(max_area, helper(i, j)) \n return max_area\n\n \"\"\"\n - depth-first search, recursive, mark visited in a set\n - O(n), O(n)\n \"\"\"\n \n visited = set() # can use global variable instead of passing into stack\n \n def helper(x, y):\n if x < 0 or x >= len(grid) or y < 0 or y >= len(grid[0]) or (x, y) in visited:\n return 0\n visited.add((x, y))\n if grid[x][y] == 1:\n return 1 + helper(x - 1, y) + helper(x + 1, y) + helper(x, y + 1) + helper(x, y - 1)\n else:\n return 0\n \n max_area = 0\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n max_area = max(max_area, helper(i, j)) \n return max_area\n\n \"\"\"\n - depth-first search, iterative, mark visited in a set\n - O(n), O(n)\n \"\"\"\n max_area = 0\n visited = set()\n row, col = len(grid), len(grid[0])\n for i in range(row):\n for j in range(col):\n area = 0\n n = grid[i][j]\n stack = [(i, j)] # use stack to track all neighbors (all need to be searched) \n while stack:\n x, y = stack.pop()\n if 0 <= x < row and 0 <= y < col and (x, y) not in visited:\n visited.add((x, y))\n if grid[x][y] == 1:\n area += 1\n stack += [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]\n max_area = max(max_area, area)\n return max_area", "def max_area(props):\n i_max=-1\n max_area = -1\n for i, prop in enumerate(props):\n bbx = np.array(prop.bbox)\n #gets rid of any region touching the sides - fufils \"exlude on side\" function of imagej Analyze particles\n #2/2/2018 - now only excludes corner points so that onhs that touch the sides aren't excluded - this gets rid of non-retina\n if (bbx[0]==0 or bbx[2]==256) and (bbx[1]==0 or bbx[3]==1024):\n continue\n #if np.any(bbx==0) or np.any(bbx==256):\n # continue\n else:\n #find max area\n if prop.area > max_area:\n max_area = prop.area\n i_max = i\n return i_max", "def smallest_r(points, pval):\n\n N = points.shape[0]\n n = points.shape[1]\n\n meshed = [np.meshgrid(points[i, :], points[i, :]) for i in range(N)]\n diffs = np.array([cols-rows for rows, cols in meshed])\n box_cube_condition = (diffs > 0).all(axis=0)\n distsp = (diffs**pval).sum(axis=0)\n\n nolimit_connections = (distsp**(1/pval))*box_cube_condition\n nolimit_connections = np.nan_to_num(nolimit_connections)\n \n maxes, prev = {}, {}\n\n for i in range(n):\n maxes[i] = np.inf\n maxes[n-2] = 0\n \n for u in topo_sort(nolimit_connections): #can replace the top sort with points sorting\n for v in np.nonzero(nolimit_connections[:, u])[0]:\n\n alt = max(maxes[u], nolimit_connections[v, u])\n \n if alt < maxes[v]:\n\n maxes[v] = alt\n prev[v] = u\n\n u = n-1\n\n path=[]\n path.append(u)\n \n if prev.get(u) is not None:\n while u != n-2:\n u = prev[u]\n path.append(u)\n\n return path, maxes[n-1]\n\n else:\n return [], 0.0", "def height(t):\n if t.is_empty:\n return 0\n else:\n left = height(t.left)\n right = height(t.right)\n \n return 1 + max([left, right])", "def footprint_corner_indices():", "def getNumHexRings(self):\n maxRing = 0\n for a in self.getAssemblies():\n ring, _pos = self.spatialGrid.getRingPos(a.spatialLocator)\n maxRing = max(maxRing, ring)\n return maxRing", "def largest_polygon(polygons):\n # we should probably use a complicated formula to do this\n # but for now, it probably suffices to notice that the last one is usually\n # the largest\n return polygons.points[-1]", "def twoMaxs(lnp):\n\tindex1 = 0\n\tindex2 = 0\n\tcnt = 0\n\tmaxArea = 0\n\tmaxArea2 = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(ew * eh >= maxArea):\n\t\t\tindex1 = cnt\n\t\t\tmaxArea = ew * eh\n\t\tcnt += 1\n\t\n\n\tcnt = 0\n\tfor (ex, ey, ew, eh) in lnp:\n\t\tif(index1 == cnt):\n\t\t\tcnt += 1\n\t\t\tcontinue\n\t\tif(ew * eh >= maxArea2):\n\t\t\tindex2 = cnt\n\t\t\tmaxArea2 = ew * eh\n\t\tcnt +=1\n\t\n\treturn (index1, index2)", "def compute_D_hmin_hmax_(mesh0: trimesh.Trimesh):\n mesh = copy.deepcopy(mesh0)\n mesh = center_align_mesh(mesh)\n\n x = mesh.vertices[:,0]\n y = mesh.vertices[:,1]\n z = mesh.vertices[:,2]\n\n D = np.mean(np.ptp(mesh.vertices[:,:2], axis=0))\n\n # assume the cell is radially symmetric; we measure the mean height along r in bins\n # and then take the minimum and maximum\n r = np.sqrt(x**2 + y**2)\n\n nv = len(x)\n bin_edges = np.linspace(0, D/2, nv//100, endpoint=True)\n bin_r = (bin_edges[:-1] + bin_edges[1:]) / 2\n bin_h = []\n\n for r0, r1 in zip(bin_edges[:-1], bin_edges[1:]):\n idx = np.argwhere(np.logical_and(r >= r0, r < r1))\n if len(idx):\n h = np.ptp(z[idx])\n else:\n h = 0\n bin_h.append(h)\n\n bin_h = np.array(bin_h)\n\n hmax = np.max(bin_h)\n # hmin is the minimum height of the cell in the inside region,\n # i.e. we must filter out the particles that have a larger radial coordinate\n # than those at hmax.\n idmax = np.argmax(np.abs(bin_h))\n rhmax = bin_r[idmax]\n idx = np.argwhere(bin_r <= rhmax)\n hmin = np.min(bin_h[idx])\n\n return D, hmin, hmax", "def len_square(bound):\n\treturn (8 - 2 * bound)", "def find_max_coords(self):\n all_max_bound = []\n all_min_bound = []\n shape_dict = self.shape_dict\n for zone_id in shape_dict:\n zone_shape = shape_dict[zone_id]\n max_bound_zone = zone_shape.max_bound\n min_bound_zone = zone_shape.min_bound\n all_max_bound.append(max_bound_zone)\n all_min_bound.append(min_bound_zone)\n\n map_max_bound, unused_max = Utils.calculate_boundaries(all_max_bound)\n unused_min, map_min_bound = Utils.calculate_boundaries(all_min_bound)\n\n return (map_max_bound, map_min_bound)", "def bottomRightCorner(self):\n self._updateExtents()\n return (self._mMaxX,self._mMaxY)", "def maxArea(self, height: List[int]) -> int:\n \n left = 0\n right = len(height)-1\n max_area=0\n \n while left < right:\n area = (right-left)*(min(height[left], height[right]))\n if area > max_area:\n max_area=area\n \n if height[left] <= height[right]:\n left+=1\n else:\n right-=1\n \n return max_area", "def trapezoid_area(base_minor, base_major, height):\n return ((base_major + base_minor) / 2 ) * height", "def find_delaunay_with_max_vertices(bbox, nvertex):\n # find bracketing values\n a1 = a2 = 1e6\n t1 = calculate_mesh(a1, bbox, nvertex)\n afac = np.power(10., -np.sign(t1))\n while (\n np.sign(t1) ==\n np.sign(calculate_mesh(a2, bbox, nvertex))\n ):\n a2 *= afac\n val_at_root = -1\n nvtweak = nvertex\n while val_at_root < 0:\n a = scipy.optimize.brentq(\n calculate_mesh,\n a1,\n a2,\n args=(bbox, nvtweak, ))\n val_at_root = calculate_mesh(a, bbox, nvertex)\n a1 = a * 2\n a2 = a * 0.5\n nvtweak -= 1\n mesh = calculate_mesh(a, bbox, None, get_t=True)\n return mesh, a", "def get_height_iterative(self):\n max_so_far = 0\n nodes_queue = deque()\n nodes_queue.append((self.root, 0))\n while nodes_queue:\n node, depth = nodes_queue.popleft()\n max_so_far = max(max_so_far, depth)\n if node.left:\n nodes_queue.append((node.left, depth + 1))\n if node.right:\n nodes_queue.append((node.right, depth + 1))\n return max_so_far", "def test_rhombus_area(self):\n self.assertEqual(17.5, rhombus_area(\n self.values['diagonal_1'], self.values['diagonal_2']))" ]
[ "0.610899", "0.59568053", "0.5939055", "0.5891226", "0.5877208", "0.58417696", "0.5826405", "0.58173084", "0.5798544", "0.57555825", "0.57455087", "0.57285994", "0.57191706", "0.5711928", "0.5706906", "0.5703597", "0.56971043", "0.56840116", "0.56803584", "0.56797194", "0.5674422", "0.56648266", "0.5659667", "0.5657215", "0.5615989", "0.5601487", "0.55739456", "0.55720687", "0.556514", "0.5538234" ]
0.70866054
0
Utility method to build a the PayPal Pay request for starting the transaction process. The response will contain a payKey that will be used when we redirect the user to PayPal to complete the transaction.
def create_pay_request(donation_amount, charities): # Try to split the donation amount equally amount all charities l = len(charities) split_amount = round(donation_amount/l, 2) # Test that the amount was equally split. If it's not we must # adjust one of the split amounts to make the sum of the donation # be what the user wants n = split_amount * l # Equal split if n == donation_amount: fraction = 0.00 # Sum is greater than the user's donation amount so adjust the # amount by -0.01 elif n > donation_amount: fraction = -0.01 # Sum is less than the user's donation amount so adjust the # amount by +0.01 else: fraction = 0.01 # Build the list of receivers. Each receiver will be the charity # and the amount will be the split amount. The first charity will # get the fractional amount added or substracted. receiver_list = [] sum = 0 for i in range(len(charities)): r = { "email": charities[i].email, } # Add the fraction to the first charity's amount amount = split_amount if i == 0: amount += fraction sum += amount # convert the amount to a string with precision of 2 and add # the receiver to the list r['amount'] = "%0.2f" % amount receiver_list.append(r) # Build the JSON object for the payment request. All of this is pretty # standard stuff and can be found in the Adaptive Payments documentation json = { "returnUrl": settings.RETURN_URL, "cancelUrl": settings.CANCEL_URL, "receiverList": { "receiver": receiver_list }, "currencyCode": "USD", "actionType": "PAY", "reverseAllParallelPaymentsOnError": True, "requestEnvelope": {"errorLanguage": "en_US"} } # Use Google's fetch method to send the pay request to PayPal. The response # will contain the payKey. response = fetch(settings.API_ENDPOINT+"/Pay", payload=simplejson.dumps(json), method="POST", headers=get_paypal_headers()) return simplejson.loads(response.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_complete_request_body():\n return \\\n {\n \"intent\": \"sale\",\n \"payer\": {\n \"payment_method\": \"paypal\"},\n \"redirect_urls\": {\n \"return_url\": \"http://localhost:3000/payment/execute\",\n \"cancel_url\": \"http://localhost:3000/\"},\n \"transactions\": [{\n \"item_list\": {\n \"items\": [{\n \"name\": \"item\",\n \"sku\": \"item\",\n \"price\": \"5.00\",\n \"currency\": \"USD\",\n \"quantity\": 1}]},\n \"amount\": {\n \"total\": \"5.00\",\n \"currency\": \"USD\"},\n \"description\": \"This is the payment transaction description.\"}]\n }", "def to_create_payement_request(self):\n if not isinstance(self.reference, str):\n raise ValueError(\n 'reference should be string. This field is required')\n\n result = {\n 'amount': {\n # docs:https://docs.adyen.com/development-resources/currency-codes\n 'value': self.amount * 100,\n 'currency': self.currency\n },\n 'reference': self.reference,\n 'countryCode': self.country_code,\n }\n\n if self.shopper_reference and isinstance(self.shopper_reference, str):\n result['shopperReference'] = self.shopper_reference\n result['recurringProcessingModel'] = 'CardOnFile'\n result['storePaymentMethod'] = True\n\n return result", "def paypal_gateway(self):\n\n print(request.form)\n\n # Gather information from callback response\n first_name = request.form.get(\"first_name\", None)\n last_name = request.form.get(\"last_name\", None)\n payer_id = request.form.get(\"payer_id\", None)\n payer_email = request.form.get(\"payer_email\", None)\n item_name = request.form.get(\"item_name\", None)\n item_number = request.form.get(\"item_number\", None)\n custom = request.form.get(\"custom\", None)\n payment_gross = request.form.get(\"payment_gross\", None)\n\n ## Generate Token and store in database\n gen_uuid = str(uuid.uuid4())\n\n try:\n t = Token()\n t.uuid = gen_uuid\n t.email = payer_email\n t.active = True\n t.package = item_name\n t.package_id = item_number\n\n db.session.add(t)\n db.session.commit()\n except:\n import traceback\n db.session.rollback()\n traceback.print_exc()\n\n ## Send email to user with unique link\n try:\n msg = Message(\n \"Guildbit - Order Confirmation\",\n sender=settings.DEFAULT_MAIL_SENDER,\n recipients=[payer_email])\n\n msg.html = render_template(\"emails/payment_thankyou.html\", package=item_name, uuid=gen_uuid)\n mail.send(msg)\n except:\n import traceback\n traceback.print_exc()\n\n return jsonify({\n \"status\": \"received\"\n })", "def build_jspay_params(paysign_key, appid, prepay_id):\n _params = {\n \"nonceStr\": sign.random_nonce_str(32),\n \"timeStamp\": int(time.time()),\n \"package\": 'prepay_id={}'.format(prepay_id),\n \"signType\": \"MD5\",\n \"appId\": appid\n }\n _params['paySign'] = sign.sign_for_pay(paysign_key, **_params)\n return _params", "def make_payment():\n\n response = VoiceResponse()\n if 'caller_name' not in session:\n session['caller_name'] = request.args.get(\n 'caller_name') or \"Twilio Payment\"\n if 'payment_amount' not in session:\n session['payment_amount'] = request.args.get('amount') or \"5000\"\n if 'card_number' not in session:\n response.redirect('/get_card_number')\n elif 'expiry' not in session:\n response.redirect('/get_expiry')\n elif 'cvv' not in session:\n response.redirect('/get_cvv')\n else:\n call_sid = request.form.get('CallSid')\n session['call_sid'] = call_sid\n response.redirect('/process_payment')\n\n return str(response)", "def payReturn(request, *args, **kwargs):\n initParam = {}\n pay_key = request.session.get('pay_key', None)\n gateway = request.session.get('gateway', None)\n if pay_key and gateway:\n del request.session['pay_key']\n del request.session['gateway']\n #Check and get Transaction information\n checkMethod = kwargs.pop('checkMethod', None)\n if checkMethod:\n initParam['pay_key'] = pay_key\n initParam['gateway'] = gateway\n transaction = checkMethod(request, initParam=initParam)\n if transaction:\n p = driver.PayPal()\n #Check whether use has paid successfully.\n result = p.check_ap_payment_status(transaction.pay_key)\n if result['status'][0] == 'COMPLETED':\n #Do something after user payed successfully.\n executeMethod = kwargs.pop('executeMethod', None)\n if executeMethod:\n initParam['transaction_id'] = transaction.id\n initParam['buyer_account'] = result['senderEmail'][0]\n if executeMethod(initParam=initParam):\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if back_page:\n del request.session['back_page']\n if success_page:\n del request.session['success_page']\n initParam['success_page'] = success_page\n initParam['success_page_msg'] = request.session['success_page_msg']\n #For the value in paypal_success.html\n initParam['app'] = transaction.app\n initParam['price'] = transaction.price\n initParam['type'] = 'Transaction'\n initParam['msg'] = _('You have successfully paid the money. We have already sent an email to the app seller. In the meanwhile you can send private message to seller as well.')\n log.info(_('User %(param1)s has paid with transaction id %(param2)s.')\n % {'param1': request.user.username, 'param2': transaction.id})\n return render_to_response(\"payment/paypal_success.html\", initParam, context_instance=RequestContext(request))\n else:\n log.error(_('User %(param1)s has paid with transaction id %(param2)s, but execute method %(param3)s failed.')\n % {'param1': request.user.username, 'param2': transaction.id, 'param3': executeMethod.__name__})\n else:\n log.error(_('User %(param1)s has paid with transaction id %(param2)s, but ExecuteMethod does not exist.')\n % {'param1': request.user.username, 'param2': transaction.id})\n else:\n log.error(_('User %(param1)s has no paid with transaction id %(param2)s.')\n % {'param1': request.user.username, 'param2': transaction.id})\n else:\n log.error(_('PayKey %(param1)s, Gateway: %(param2)s, User: %(param3)s, Execute method %(param4)s failed.')\n % {'param1': pay_key, 'param2': gateway, 'param3': request.user.username, 'param4': checkMethod.__name__})\n else:\n log.error(_('PayKey %(param1)s, Gateway: %(param2)s, CheckMethod does not exist.')\n % {'param1': pay_key, 'param2': gateway})\n else:\n log.error(_('Pay. PayKey or Gateway no exists.'))\n\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if success_page:\n del request.session['success_page']\n if back_page:\n del request.session['back_page']\n error_msg = driver.GENERIC_PAYPAL_ERROR\n page_msg = request.session['back_page_msg']\n return render_to_response('payment/paypal_cancel.html',\n {'error_msg': error_msg, 'back_page': back_page, 'back_page_msg': page_msg}, context_instance=RequestContext(request))\n else:\n error_msg = _('%(param1)s Please transaction again.') % {'param1': driver.GENERIC_PAYPAL_ERROR}\n return render_to_response('payment/paypal_error.html',\n {\"error_msg\": error_msg}, context_instance=RequestContext(request))", "def __create_initiate_transaction_request(payment_details):\n head = CommonUtil.get_secure_request_header(MerchantProperty.get_client_id(), payment_details.get_channel_id())\n body = payment_details._PaymentDetail__create_initiate_transaction_request_body()\n return InitiateTransactionRequest().set_head(head).set_body(body)", "def post(self, payment_id=None):\n data = request.get_json()\n redirect_url = data.get('redirect_url')\n cart_token = data.get('cart_token')\n address_id = data.get('address_id')\n \n cart = Cart.query.filter_by(token=cart_token, user_id=current_user.id).first()\n if not cart:\n return {\"message\":\"No cart with this id\"}, 404\n\n if not address_id:\n return {\"message\": \"Please enter a address for your order\"}, 404\n\n order = Order.create_from_cart(cart_token, address_id)\n payment = Payment.query.filter_by(order_id=order.id).first()\n if not payment:\n payment = Payment(\n user_id=current_user.id, \n order_id=order.id, \n amount=order.total,\n status='Pending'\n )\n\n db.session.add(payment)\n db.session.commit()\n\n client = Client(current_app.config['ZARINPAL_WEBSERVICE'])\n mail = current_user._email\n\n if not mail:\n return {\"message\": \"Please enter your email address to continue the payment\"}\n\n user_info = UserAddress.query.filter_by(id=address_id).first()\n if user_info.phone:\n mobile = user_info.phone\n else:\n mobile = '' \n\n result = client.service.PaymentRequest(current_app.config['MERCHANT_ID'],\n payment.amount,\n 'nani',\n mail,\n mobile,\n redirect_url)\n\n payment.authority = result.Authority\n db.session.commit()\n if result.Status == 100:\n return {'payment_url':'https://www.zarinpal.com/pg/StartPay/' + result.Authority}\n else:\n return {\n 'message':\"We can't connect you to zarin pal server, right now. Please try again in a few moments.\"\n }, 404", "def post(self):\n \n access_token = accessToken.gerated_access_token\n api_url = \"https://sandbox.safaricom.co.ke/mpesa/stkpush/v1/processrequest\"\n headers = { \"Authorization\": \"Bearer %s\" % access_token }\n request = {\n \"BusinessShortCode\": constants.BusinessShortCode ,\n \"Password\": generated_password,\n \"Timestamp\": generated_timestamp,\n \"TransactionType\": \"CustomerPayBillOnline\",\n \"Amount\": \"1\",\n \"PartyA\": \"254705275702\",\n \"PartyB\": constants.BusinessShortCode,\n \"PhoneNumber\": \"\", #pass in the phone number that will be prompted to enter the pin\n \"CallBackURL\": \"https://test.com\", #pass in an actual callback url if you have one\n \"AccountReference\": \"Test100\",\n \"TransactionDesc\": \"Test payment\"\n }\n \n response = requests.post(api_url, json = request, headers=headers)\n # print (response.text)\n\n return {\"response\":response.json()}", "def pay():\n data = request.get_json()\n print(data)\n intent = stripe.PaymentIntent.create(amount=data['amnt'], currency='usd', metadata={'integration_check': 'accept_a_payment'})\n return jsonify(client_secret=intent.client_secret)", "def create_payment_data(self, **kwargs):\n\n order_id = kwargs.get('order_id')\n timestamp = datetime.now().strftime('%Y%m%d%H%M%S')\n\n currency = kwargs.get('currency', 'RON')\n amount = kwargs.get('amount')\n customer_id = kwargs.get('customer_id')\n\n # the description of the payment\n details = kwargs.get('details')\n\n billing = kwargs.get('billing', {})\n\n params = kwargs.get('params', {})\n\n # urls\n confirm_url = kwargs.get('confirm_url')\n return_url = kwargs.get('return_url')\n\n if not order_id or not amount or not customer_id or not details or not confirm_url or not return_url:\n if self.developement:\n debug(\"Arguments for create_payment_data: %s\", kwargs)\n\n raise Exception(\"Can't create mobilpay request with missing args.\")\n\n order_id = str(order_id)\n if len(order_id) > 64:\n raise Exception('order_id should not have more than 64 characters.')\n\n args = {\n # order tag\n \"order_id\": order_id,\n \"order_type\": \"card\",\n \"timestamp\": timestamp,\n\n # invoice tag\n \"amount\": amount, \n \"currency\": currency,\n \"customer_id\": customer_id,\n\n \"details\": details,\n\n # other params\n \"params\": params,\n\n # urls\n \"confirm_url\": confirm_url,\n \"return_url\": return_url\n }\n\n if billing:\n args['billing'] = {\n \"first_name\": billing.get('first_name', ''),\n \"last_name\": billing.get('last_name', ''),\n \"address\": billing.get('address', ''),\n \"phone\": billing.get('phone', ''),\n \"email\": billing.get('email', '')\n }\n\n # create the xml\n xml_message = self.create_request_xml(**args)\n\n if self.developement:\n debug(xml_message)\n\n return self.encrypt_message(xml_message)", "def payPalReturn(request, *args, **kwargs):\n initParam = {}\n token = request.GET.get('token')\n payerID = request.GET.get('PayerID')\n initParam['token'] = token\n initParam['payerid'] = payerID\n if token and payerID:\n p = driver.PayPal()\n EC_RETURNURL = '/'.join([common.getHttpHeader(request), 'payment/paypal_return'])\n EC_CANCELURL = '/'.join([common.getHttpHeader(request), 'payment/paypal_cancel'])\n res_dict = p.GetExpressCheckoutDetailsInfo(EC_RETURNURL, EC_CANCELURL, token)\n state = p._get_value_from_qs(res_dict, 'ACK')\n if state in [\"Success\", \"SuccessWithWarning\"]:\n #Show the list of service detail to user.\n executeMethod = kwargs.pop('executeMethod', None)\n if executeMethod:\n gateway = request.session.get('gateway', None)\n if gateway:\n initParam['gateway'] = gateway\n serviceDetail, serviceItems, discount_rate = executeMethod(request, initParam=initParam)\n if serviceDetail and serviceItems:\n initParam['serviceDetail'] = serviceDetail\n initParam['serviceItems'] = serviceItems\n initParam['discount_rate'] = discount_rate\n return render_to_response('payment/paypal_return.html', initParam, context_instance=RequestContext(request))\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, Execute method %(param3)s failed.')\n % {'param1': token, 'param2': payerID, 'param3': executeMethod.__name__})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s. Gateway no exists in request.session.')\n % {'param1': token, 'param2': payerID})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, ExecuteMethod does not exist.')\n % {'param1': token, 'param2': payerID})\n else:\n error = p._get_value_from_qs(res_dict, 'L_SHORTMESSAGE0')\n log.error(_('Token %(param1)s, PayerID: %(param2)s, %(param3)s.')\n % {'param1': token, 'param2': payerID, 'param3': error})\n else:\n log.error(_('Token or PayerID no exists.'))\n\n if request.session.get('gateway', None):\n del request.session['gateway']\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if success_page:\n del request.session['success_page']\n if back_page:\n del request.session['back_page']\n error_msg = driver.GENERIC_PAYPAL_ERROR\n page_msg = request.session['back_page_msg']\n return render_to_response('payment/paypal_cancel.html',\n {'error_msg': error_msg, 'back_page': back_page, 'back_page_msg': page_msg}, context_instance=RequestContext(request))\n else:\n error_msg = _('%(param1)s Please payment again.') % {'param1': driver.GENERIC_PAYPAL_ERROR}\n return render_to_response('payment/paypal_error.html',\n {\"error_msg\": error_msg}, context_instance=RequestContext(request))", "def make_url(self, returnURL, paymentReason, pipelineName,\r\n transactionAmount, **params):\r\n # use the sandbox authorization endpoint if we're using the\r\n # sandbox for API calls.\r\n endpoint_host = 'authorize.payments.amazon.com'\r\n if 'sandbox' in self.host:\r\n endpoint_host = 'authorize.payments-sandbox.amazon.com'\r\n base = \"/cobranded-ui/actions/start\"\r\n\r\n params['callerKey'] = str(self.aws_access_key_id)\r\n params['returnURL'] = str(returnURL)\r\n params['paymentReason'] = str(paymentReason)\r\n params['pipelineName'] = pipelineName\r\n params['transactionAmount'] = transactionAmount\r\n params[\"signatureMethod\"] = 'HmacSHA256'\r\n params[\"signatureVersion\"] = '2'\r\n \r\n if(not params.has_key('callerReference')):\r\n params['callerReference'] = str(uuid.uuid4())\r\n\r\n parts = ''\r\n for k in sorted(params.keys()):\r\n parts += \"&%s=%s\" % (k, urllib.quote(params[k], '~'))\r\n\r\n canonical = '\\n'.join(['GET',\r\n str(endpoint_host).lower(),\r\n base,\r\n parts[1:]])\r\n\r\n signature = self._auth_handler.sign_string(canonical)\r\n params[\"signature\"] = signature\r\n\r\n urlsuffix = ''\r\n for k in sorted(params.keys()):\r\n urlsuffix += \"&%s=%s\" % (k, urllib.quote(params[k], '~'))\r\n urlsuffix = urlsuffix[1:] # strip the first &\r\n \r\n fmt = \"https://%(endpoint_host)s%(base)s?%(urlsuffix)s\"\r\n final = fmt % vars()\r\n return final", "def response_post_params(cls, post_params):\r\n resp_params = {\r\n # Indicate whether the payment was successful\r\n \"decision\": \"ACCEPT\" if cls.PAYMENT_STATUS_RESPONSE == \"success\" else \"REJECT\",\r\n\r\n # Reflect back whatever the client sent us,\r\n # defaulting to `None` if a paramter wasn't received\r\n \"course_id\": post_params.get('course_id'),\r\n \"orderAmount\": post_params.get('amount'),\r\n \"ccAuthReply_amount\": post_params.get('amount'),\r\n \"orderPage_transactionType\": post_params.get('orderPage_transactionType'),\r\n \"orderPage_serialNumber\": post_params.get('orderPage_serialNumber'),\r\n \"orderNumber\": post_params.get('orderNumber'),\r\n \"orderCurrency\": post_params.get('currency'),\r\n \"match\": post_params.get('match'),\r\n \"merchantID\": post_params.get('merchantID'),\r\n\r\n # Send fake user data\r\n \"billTo_firstName\": \"John\",\r\n \"billTo_lastName\": \"Doe\",\r\n \"billTo_street1\": \"123 Fake Street\",\r\n \"billTo_state\": \"MA\",\r\n \"billTo_city\": \"Boston\",\r\n \"billTo_postalCode\": \"02134\",\r\n \"billTo_country\": \"us\",\r\n\r\n # Send fake data for other fields\r\n \"card_cardType\": \"001\",\r\n \"card_accountNumber\": \"############1111\",\r\n \"card_expirationMonth\": \"08\",\r\n \"card_expirationYear\": \"2019\",\r\n \"paymentOption\": \"card\",\r\n \"orderPage_environment\": \"TEST\",\r\n \"orderPage_requestToken\": \"unused\",\r\n \"reconciliationID\": \"39093601YKVO1I5D\",\r\n \"ccAuthReply_authorizationCode\": \"888888\",\r\n \"ccAuthReply_avsCodeRaw\": \"I1\",\r\n \"reasonCode\": \"100\",\r\n \"requestID\": \"3777139938170178147615\",\r\n \"ccAuthReply_reasonCode\": \"100\",\r\n \"ccAuthReply_authorizedDateTime\": \"2013-08-28T181954Z\",\r\n \"ccAuthReply_processorResponse\": \"100\",\r\n \"ccAuthReply_avsCode\": \"X\",\r\n\r\n # We don't use these signatures\r\n \"transactionSignature\": \"unused=\",\r\n \"decision_publicSignature\": \"unused=\",\r\n \"orderAmount_publicSignature\": \"unused=\",\r\n \"orderNumber_publicSignature\": \"unused=\",\r\n \"orderCurrency_publicSignature\": \"unused=\",\r\n }\r\n\r\n # Indicate which fields we are including in the signature\r\n # Order is important\r\n signed_fields = [\r\n 'billTo_lastName', 'orderAmount', 'course_id',\r\n 'billTo_street1', 'card_accountNumber', 'orderAmount_publicSignature',\r\n 'orderPage_serialNumber', 'orderCurrency', 'reconciliationID',\r\n 'decision', 'ccAuthReply_processorResponse', 'billTo_state',\r\n 'billTo_firstName', 'card_expirationYear', 'billTo_city',\r\n 'billTo_postalCode', 'orderPage_requestToken', 'ccAuthReply_amount',\r\n 'orderCurrency_publicSignature', 'orderPage_transactionType',\r\n 'ccAuthReply_authorizationCode', 'decision_publicSignature',\r\n 'match', 'ccAuthReply_avsCodeRaw', 'paymentOption',\r\n 'billTo_country', 'reasonCode', 'ccAuthReply_reasonCode',\r\n 'orderPage_environment', 'card_expirationMonth', 'merchantID',\r\n 'orderNumber_publicSignature', 'requestID', 'orderNumber',\r\n 'ccAuthReply_authorizedDateTime', 'card_cardType', 'ccAuthReply_avsCode'\r\n ]\r\n\r\n # Add the list of signed fields\r\n resp_params['signedFields'] = \",\".join(signed_fields)\r\n\r\n # Calculate the fields signature\r\n signed_fields_sig = processor_hash(resp_params['signedFields'])\r\n\r\n # Calculate the public signature\r\n hash_val = \",\".join([\r\n \"{0}={1}\".format(key, resp_params[key])\r\n for key in signed_fields\r\n ]) + \",signedFieldsPublicSignature={0}\".format(signed_fields_sig)\r\n\r\n resp_params['signedDataPublicSignature'] = processor_hash(hash_val)\r\n\r\n return resp_params", "def pay(payment_request: PaymentRequest):\n try:\n payment = Payment.get(payment_request.id)\n log.info('payment is already complete - not double spending', payment=payment)\n return payment\n except PaymentNotFoundError:\n pass\n\n log.info('trying to pay', payment_id=payment_request.id)\n\n # XXX retry on retry-able errors\n try:\n with get_sdk(config.STELLAR_BASE_SEED, payment_request.app_id) as blockchain:\n tx_id = blockchain.pay_to(\n payment_request.recipient_address,\n payment_request.amount,\n payment_request.id)\n enqueue_report_wallet_balance(blockchain.root_address)\n\n log.info('paid transaction', tx_id=tx_id, payment_id=payment_request.id)\n statsd.inc_count('transaction.paid',\n payment_request.amount,\n tags=['app_id:%s' % payment_request.app_id])\n except PERSISTENT_ERRORS as e:\n raise PersistentError(e)\n except Exception as e:\n statsd.increment('transaction.failed',\n tags=['app_id:%s' % payment_request.app_id])\n log.exception('failed to pay transaction', payment_id=payment_request.id)\n raise\n\n payment = Payment.from_payment_request(payment_request, blockchain.root_address, tx_id)\n payment.save()\n\n log.info('payment complete - submit back to callback payment.callback', payment=payment)\n\n return payment", "def _generate_transaction(\n payment: Payment,\n kind: str,\n amount: Decimal,\n *,\n id='',\n is_success=True,\n **data) -> Transaction:\n transaction = create_transaction(\n payment=payment,\n kind=kind,\n amount=amount,\n currency=data.pop('currency', payment.currency),\n gateway_response=data,\n token=id,\n is_success=is_success)\n return transaction", "def post(self, request, *args, **kwargs):\n try:\n params = {smart_str(k): smart_str(v) for k, v in request.data.iteritems()}\n sign = params.pop('sign')\n params.pop('sign_type')\n with open(settings.ALIPAY_APP_INFO['basic_info']['ALIPAY_RSA_PUBLIC_KEY'], 'r') as fp:\n public_key = fp.read()\n\n if verify_with_rsa(public_key, get_sign_content(params), sign):\n passback_params = request.data.get(\"passback_params\")\n out_trade_no = request.data.get(\"out_trade_no\", \"\")\n\n post_data = {\n \"trade_type\": \"alipay\",\n \"out_trade_no\": out_trade_no,\n \"trade_no\": request.data.get(\"trade_no\"),\n \"total_fee\": request.data.get(\"total_amount\"),\n \"buyer_email\": request.data.get(\"buyer_email\"),\n \"extra_common_param\": request.data.get(\"passback_params\"),\n }\n\n if out_trade_no != \"\":\n rep = requests.post(passback_params, data=post_data)\n rep_data = rep.json()\n if rep_data.get('result') == \"success\":\n return HttpResponse('success')\n except Exception, e:\n log.exception(e)\n return HttpResponse(\"fail\")", "def get_gateway_url(self, request):\n params = {\n 'id': self.get_backend_setting('id'),\n 'description': self.get_order_description(self.payment, self.payment.order),\n 'amount': self.payment.amount,\n 'currency': self.payment.currency,\n 'type': 0, # 0 = show \"return\" button after finished payment\n 'control': self.payment.pk,\n 'URL': self.get_URL(self.payment.pk),\n 'URLC': self.get_URLC(),\n 'api_version': 'dev',\n }\n\n user_data = {\n 'email': None,\n 'lang': None,\n }\n signals.user_data_query.send(sender=None, order=self.payment.order, user_data=user_data)\n\n if user_data['email']:\n params['email'] = user_data['email']\n\n if user_data['lang'] and user_data['lang'].lower() in self._ACCEPTED_LANGS:\n params['lang'] = user_data['lang'].lower()\n elif self.get_backend_setting('lang', False\n ) and self.get_backend_setting('lang').lower() in self._ACCEPTED_LANGS:\n params['lang'] = self.get_backend_setting('lang').lower()\n\n if self.get_backend_setting('onlinetransfer', False):\n params['onlinetransfer'] = 1\n if self.get_backend_setting('p_email', False):\n params['p_email'] = self.get_backend_setting('p_email')\n if self.get_backend_setting('p_info', False):\n params['p_info'] = self.get_backend_setting('p_info')\n if self.get_backend_setting('tax', False):\n params['tax'] = 1\n\n gateway_url = self.get_backend_setting('gateway_url', self._GATEWAY_URL)\n\n if self.get_backend_setting('method', 'get').lower() == 'post':\n return gateway_url, 'POST', params\n elif self.get_backend_setting('method', 'get').lower() == 'get':\n for key in params.keys():\n params[key] = six.text_type(params[key]).encode('utf-8')\n return gateway_url + '?' + urlencode(params), \"GET\", {}\n else:\n raise ImproperlyConfigured('Dotpay payment backend accepts only GET or POST')", "def event_payu_com_dpn(self, **post):\n cr, uid, context = request.cr, request.uid, request.context\n payment_acquire = request.env['payment.acquirer'].sudo().search([('provider', '=', 'payu')])\n transactionDetails = {}\n transactionDetails['store'] = {}\n transactionDetails['store']['soapUsername'] = payment_acquire.payu_api_username\n transactionDetails['store']['soapPassword'] = payment_acquire.payu_api_password\n transactionDetails['store']['safekey'] = payment_acquire.payu_seller_account\n transactionDetails['store']['environment'] = payment_acquire.environment\n transactionDetails['additionalInformation'] = {}\n transactionDetails['additionalInformation']['payUReference'] = post['PayUReference']\n try:\n result = PayuController.payuMeaGetTransactionApiCall('', transactionDetails)\n payment_transation_id = request.env['payment.transaction'].sudo().search(\n [('reference', '=', result['merchantReference'])])\n payu_response = {}\n if result:\n payu_response['TRANSACTION_STATUS'] = result['transactionState']\n # payu_response['SUCCESSFUL'] = result['successful']\n payu_response['AMOUNT'] = payment_transation_id.amount * 100 if payment_transation_id else 0.00\n payu_response['CURRENCYCODE'] = result['basket']['currencyCode']\n payu_response['PAYUREFERENCE'] = result['payUReference']\n payu_response['REFERENCE'] = result['merchantReference']\n payu_response['RESULTMESSAGE'] = result['resultMessage']\n response_state = request.env['payment.transaction'].sudo().form_feedback(payu_response, 'payu')\n # response_state = PaymentTransactionCus.form_feedback('', payu_response, 'payu')\n # if response_state:\n # return werkzeug.utils.redirect('/shop/payment/validate')\n # else:\n # return werkzeug.utils.redirect('/shop/unsuccessful')\n\n sale_order_id = request.env['sale.order'].sudo().search([('name', '=', result['merchantReference'])])\n sale_order_data = sale_order_id\n request.session['sale_last_order_id'] = sale_order_id.id\n\n tx_id = request.env['payment.transaction'].sudo().search([('reference', '=', result['merchantReference'])])\n tx = tx_id\n if not sale_order_id or (sale_order_id.amount_total and not tx):\n return request.redirect('/shop')\n if (not sale_order_id.amount_total and not tx) or tx.state in ['pending']:\n if sale_order_id.state in ['draft', 'sent']:\n if (not sale_order_id.amount_total and not tx):\n sale_order_id.action_button_confirm()\n email_act = sale_order_id.action_quotation_send()\n elif tx and tx.state == 'cancel':\n sale_order_id.action_cancel()\n elif tx and (tx.state == 'draft' or tx.state == 'sent' or tx.state == 'done'):\n # if result and payu_response['successful'] and payu_response['TRANSACTION_STATUS'] in ['SUCCESSFUL', 'PARTIAL_PAYMENT', 'OVER_PAYMENT']:\n if result and payu_response['TRANSACTION_STATUS'] in ['SUCCESSFUL', 'PARTIAL_PAYMENT', 'OVER_PAYMENT']:\n transaction = tx.sudo().write(\n {'state': 'done', 'date_validate': datetime.now(),\n 'acquirer_reference': result['payUReference']})\n email_act = sale_order_id.action_quotation_send()\n action_confirm_res = sale_order_id.action_confirm()\n sale_order = sale_order_id.read([])\n # if sale_order_id.state == 'sale':\n # journal_ids = request.env['account.journal'].sudo().search([('name', '=', 'FNB 62085815143')], limit=1)\n # journal = journal_ids.read([])\n currency = request.env['res.currency'].sudo().search([('name', '=', 'ZAR')], limit=1)\n method = request.env['account.payment.method'].sudo().search([('name', '=', 'Manual')], limit=1)\n journal_id = request.env['account.journal'].sudo().search(\n [('name', '=', 'FNB - Cheque Account 6208585815143')], limit=1, order=\"id desc\")\n if journal_id:\n account_payment = {\n 'partner_id': sale_order[0]['partner_id'][0],\n 'partner_type': 'customer',\n 'journal_id': journal_id.id,\n # 'invoice_ids':[(4,inv_obj.id,0)],\n 'amount': sale_order[0]['amount_total'],\n 'communication': sale_order_id.name,\n 'currency_id': currency.id,\n 'payment_type': 'inbound',\n 'payment_method_id': method.id,\n 'payment_transaction_id': tx.id,\n }\n acc_payment = request.env['account.payment'].sudo().create(account_payment)\n acc_payment.sudo().post()\n sale_order_id = request.session.get('sale_last_order_id')\n print(\"\\n\\n\\n\\n\\n\\n=======================sale order sale order======\", sale_order_id)\n sale_order_data = request.env['sale.order'].sudo().browse(sale_order_id)\n # if sale_order_data.project_project_id:\n # request.session['last_project_id'] = sale_order_data.project_project_id.id\n if response_state:\n sale_order_data.message_post(subject=\"T&C's Privacy Policy\",\n body=\"%s accepted T&C's and Privacy Policy.\" % sale_order_data.partner_id.name)\n return werkzeug.utils.redirect('/pay/thankyou')\n # return werkzeug.utils.redirect('/shop/confirmation')\n else:\n return werkzeug.utils.redirect('/event/unsuccessful')\n except Exception as e:\n return werkzeug.utils.redirect('/event/unsuccessful')", "def create_payment(self, payment_request):\n data = payment_request.serialize()\n random_number = int(datetime.timestamp())\n\n headers = {\n 'app-id': self.app_id,\n 'Random': random_number,\n 'Hmac': self.generate_signature(random_number)\n }\n try:\n url = f'{self.base_url}/pos'\n response = requests.post(url, headers, data)\n if response != HTTPStatus.OK:\n raise Exception\n\n response_data = PaymentResponse.from_api_json(response.json())\n\n if response_data.is_success:\n raise OvoClientError(response_data.response_status)\n\n return response_data\n\n\n\n\n\n\n\n except Exception as exc:\n log.exception(f\"Failed to create new ovo payment for order {payment_request.reference_number}\")\n raise", "def cat_int_pay():\n print(colors.Color.BLUE + \"Make the payment with digital certificate\" + colors.Color.END)\n pay_and_certificate = urllib.parse.quote(\n 'identitats.aoc.cat/o/oauth2/auth?response_type=code&client_id=tramits.'\n 'transit.cat&redirect_uri=https'\n '://multestransit.gencat.cat/sctPagaments/AppJava/loginIdCat&scope='\n 'autenticacio_usuari&access_type=online'\n '&approval_pompt=false&state=ca_ES')\n print('https://' + pay_and_certificate)\n print(colors.Color.BLUE + \"Make the payment without digital certificate\"\n + colors.Color.END)\n pay_without_certificate = urllib.parse.quote(\n 'multestransit.gencat.cat/sctPagaments/AppJava/views/expedients/cerca.'\n 'xhtml?set-locale=ca_ES')\n print('https://' + pay_without_certificate)", "def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n #3. Call PayPal to get the transaction\n response = self.client.execute(request)\n return response\n #4. Save the transaction in your database. Implement logic to save transaction to your database for future reference.", "def build_and_sign(builder, dest_address, payment_amount, prioritizer_seed=None):\n builder.append_payment_op(dest_address, str(payment_amount))\n builder.sign(builder.keypair.seed().decode())\n\n # prioritize transaction by adding a prioritizer signature\n if prioritizer_seed:\n builder.sign(prioritizer_seed)\n\n return builder.hash_hex(), builder.gen_xdr().decode()", "def send_request_to_bank(signed_request):\n\n node_address = format_address(\n ip_address='192.168.1.232',\n port=8000,\n protocol='http'\n )\n url = f'{node_address}/validator_confirmation_services'\n results = post(url=url, body=signed_request)\n\n if isinstance(results, dict):\n for k, v in results.items():\n print(f'{k}: {v}')\n\n print(results)\n\n write_json(\n os.path.join(SIGNED_REQUESTS_DIR, 'signed-validator-confirmation-services-response.json'),\n results\n )", "def build_request(self):\n self.build_header_2_40()\n self.build_fullprops()\n data_compressed = mcafee_crypto.mcafee_compress(self.agent_pubkey_epo_format + self.fullprops_xml)\n data_len = struct.pack('<I', len(data_compressed))\n final_header_len = struct.pack('<I', len(self.build_header_1()) + len(self.build_header_2_40()))\n self.build_header_1(final_header_len, data_len)\n final_header_1 = mcafee_crypto.xor_c(self.header_1)\n request_signature = mcafee_crypto.dsa_sign(self.regkey, self.header_1 + self.header_2 + data_compressed)\n data_encrypted = mcafee_crypto.mcafee_3des_encrypt(self.header_2 + data_compressed + request_signature)\n post_data = mcafee_crypto.xor_c(final_header_1) + data_encrypted\n return post_data", "def render_POST(self, request):\n log.msg(\"Paypal callback:\")\n log.msg(request.args)\n\n d = self.verify(request)\n d.addCallback(lambda ign: self._process(request.args))\n d.addErrback(log.err)\n return ''", "def _request(self, account, method, params, key):\n params_bytes = py23_bytes(json.dumps(params), self.ENCODING)\n params_enc = base64.b64encode(params_bytes).decode(self.ENCODING)\n timestamp = datetime.utcnow().strftime(self.TIMEFORMAT)[:-3] + \"Z\"\n nonce_int = random.getrandbits(64)\n nonce_bytes = struct.pack('>Q', nonce_int) # 64bit ULL, big endian\n nonce_str = \"%016x\" % (nonce_int)\n\n message = self.prehash_message(timestamp, account, method,\n params_enc, nonce_bytes)\n signature = sign_message(message, key)\n signature_hex = hexlify(signature).decode(self.ENCODING)\n\n request = {\n \"jsonrpc\": \"2.0\",\n \"id\": self.id,\n \"method\": method,\n \"params\": {\n \"__signed\": {\n \"account\": account,\n \"nonce\": nonce_str,\n \"params\": params_enc,\n \"signatures\": [signature_hex],\n \"timestamp\": timestamp\n }\n }\n }\n r = requests.post(self.url, data=json.dumps(request))\n self.id += 1\n return r.json()", "def create(**data):\n http_client = HttpClient()\n response, _ = http_client.post(routes.url(routes.PAYMENT_RESOURCE), data)\n return resources.Payment(**response)", "def __init__( self, payment, token, payment_action, payer_id ):\n\n if not isinstance(payment, fields.Payment ):\n raise ValueError( \n 'payment must be an instance of class <Payment>.' )\n\n if (token is None) or (len(token) != 20):\n raise ValueError( 'Invalid token argument' )\n\n if payment_action not in ['Sale','Authorization','Order']:\n raise ValueError( \n 'payment_action must be Sale, Authorization or Order.' )\n\n if (payer_id is None) or (len(payer_id) != 13):\n raise ValueError( 'Invalid payer id' )\n\n self._nvp_response = dict()\n self._nvp_request = dict()\n self._nvp_request['METHOD'] = 'DoExpressCheckoutPayment'\n \n nvp = copy.deepcopy( payment.get_nvp_request() )\n self._nvp_request.update( nvp )\n self._nvp_request['TOKEN'] = token\n self._nvp_request['PAYMENTACTION'] = payment_action\n self._nvp_request['PAYERID'] = payer_id", "def build_minimum_request_body():\n return \\\n {\n \"intent\": \"AUTHORIZE\",\n \"application_context\": {\n \"return_url\": \"https://www.example.com\",\n \"cancel_url\": \"https://www.example.com\"\n },\n \"purchase_units\": [\n {\n \"amount\": {\n \"currency_code\": \"USD\",\n \"value\": \"220.00\"\n }\n }\n ]\n }" ]
[ "0.6602312", "0.6058661", "0.60280204", "0.5872931", "0.5825171", "0.5807506", "0.56765485", "0.56448203", "0.56400335", "0.55968016", "0.55442744", "0.54821205", "0.54804116", "0.5471226", "0.5407815", "0.54046965", "0.5371241", "0.53533983", "0.5271067", "0.5269766", "0.52589035", "0.52541524", "0.52244174", "0.52159595", "0.51965517", "0.5173456", "0.5149333", "0.5135844", "0.5126612", "0.5125812" ]
0.6259245
1
Utility method to retrieve the payKey from a PayPal response
def get_pay_key(response): return response.get("payKey")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_api_key_from_response(response: requests.models.Response) -> str:\n api_key = None\n for line in response.text.splitlines():\n if \"Your API Key is: \" in line:\n api_key = line.split(\"Your API Key is: \")[1].split(\"<\")[0]\n return api_key\n raise ValueError(\"Cannot find API key\")", "def click_payment_key(key, timeout=default_timeout):\n return click_key(PAYMENT_KEYS[key], timeout)", "def pay_info(self) -> str:\n return pulumi.get(self, \"pay_info\")", "def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')", "def getRetKey(dictionary):\n retKey = \"\"\n try:\n if dictionary:\n retKey = dictionary.values()[0].keys()[0]\n except TypeError:\n logging.debug(\"type error\")\n\n return retKey", "def get_license_key(self):\n\t\treturn call_sdk_function('PrlLic_GetLicenseKey', self.handle)", "def response_usage_key(self, response):\r\n parsed = json.loads(response.content)\r\n self.assertEqual(response.status_code, 200)\r\n return UsageKey.from_string(parsed['locator'])", "def verify_response_dict(api_key, response):\n LOGGER.debug('Verifying WSAPI response signature')\n\n # Remove signature from the response\n r = dict(response)\n del r['h']\n\n # Convert to HTML query as that is used by Yubico to sign the response\n query = sorted_urlencode(list(r.iteritems()))\n\n # We unquote it because it's not the HTTP quoted version\n query = urllib.unquote_plus(query)\n\n status = sign(api_key, query) == response['h']\n LOGGER.debug('Signature result ' + str(status))\n return status", "def get_private_key(self, uid: str) -> str:\n return self.context.get(\n \"/dsum/private_key/%s\" % uid, None, \"DSum: failed retrieving the Curve 25519 private key with uid: %s\" % uid)['key']", "def get(id):\n repo = KeyRepository(getDb())\n try:\n key = repo.findOne(id)\n except DBException:\n return {'message': 'Key id is invalid'}, 400\n\n if key == None:\n return {'message': 'Key has not been found'}, 404\n else:\n return base64.b64decode(key.publicKey)", "def get_payment_given_pay_id(self, payment_id):\n # Verify user\n this_pay = self.get_a_payment(payment_id)\n loan_id = None\n valid_user = None\n if this_pay:\n loan_id = this_pay[\"loan_id\"]\n if not this_pay:\n return self.return_data(404, \"NOT FOUND\", {})\n if loan_id:\n valid_user = self.check_access(payment_id)\n if valid_user:\n this_pay[\"amount_paid\"] = str(this_pay[\"amount_paid\"])\n return self.return_data(200, \"SUCCESS\", this_pay) \n return self.return_data(401, \"UNAUTHORIZED\", {})", "def _get_usage_key(self, resp):\r\n usage_key_string = json.loads(resp.content).get('locator')\r\n return UsageKey.from_string(usage_key_string)", "def payment_verification(payload):\n response = requests.post(url, data=payload)\n return response.json()", "def ssl_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"ssl_key\")", "def payPalReturn(request, *args, **kwargs):\n initParam = {}\n token = request.GET.get('token')\n payerID = request.GET.get('PayerID')\n initParam['token'] = token\n initParam['payerid'] = payerID\n if token and payerID:\n p = driver.PayPal()\n EC_RETURNURL = '/'.join([common.getHttpHeader(request), 'payment/paypal_return'])\n EC_CANCELURL = '/'.join([common.getHttpHeader(request), 'payment/paypal_cancel'])\n res_dict = p.GetExpressCheckoutDetailsInfo(EC_RETURNURL, EC_CANCELURL, token)\n state = p._get_value_from_qs(res_dict, 'ACK')\n if state in [\"Success\", \"SuccessWithWarning\"]:\n #Show the list of service detail to user.\n executeMethod = kwargs.pop('executeMethod', None)\n if executeMethod:\n gateway = request.session.get('gateway', None)\n if gateway:\n initParam['gateway'] = gateway\n serviceDetail, serviceItems, discount_rate = executeMethod(request, initParam=initParam)\n if serviceDetail and serviceItems:\n initParam['serviceDetail'] = serviceDetail\n initParam['serviceItems'] = serviceItems\n initParam['discount_rate'] = discount_rate\n return render_to_response('payment/paypal_return.html', initParam, context_instance=RequestContext(request))\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, Execute method %(param3)s failed.')\n % {'param1': token, 'param2': payerID, 'param3': executeMethod.__name__})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s. Gateway no exists in request.session.')\n % {'param1': token, 'param2': payerID})\n else:\n log.error(_('Token %(param1)s, PayerID: %(param2)s, ExecuteMethod does not exist.')\n % {'param1': token, 'param2': payerID})\n else:\n error = p._get_value_from_qs(res_dict, 'L_SHORTMESSAGE0')\n log.error(_('Token %(param1)s, PayerID: %(param2)s, %(param3)s.')\n % {'param1': token, 'param2': payerID, 'param3': error})\n else:\n log.error(_('Token or PayerID no exists.'))\n\n if request.session.get('gateway', None):\n del request.session['gateway']\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if success_page:\n del request.session['success_page']\n if back_page:\n del request.session['back_page']\n error_msg = driver.GENERIC_PAYPAL_ERROR\n page_msg = request.session['back_page_msg']\n return render_to_response('payment/paypal_cancel.html',\n {'error_msg': error_msg, 'back_page': back_page, 'back_page_msg': page_msg}, context_instance=RequestContext(request))\n else:\n error_msg = _('%(param1)s Please payment again.') % {'param1': driver.GENERIC_PAYPAL_ERROR}\n return render_to_response('payment/paypal_error.html',\n {\"error_msg\": error_msg}, context_instance=RequestContext(request))", "def private_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_key\")", "def private_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"private_key\")", "def get(self, payment):\n return payment", "def _get_key(self):\n if not self.session:\n key = self.key\n else:\n key = self.session.get(\"_signature_key\")\n if key is None:\n key = str(uuid.uuid1())\n self.session[\"_signature_key\"] = key\n return key", "def email_key(self):\r\n url = '{0}/emailKey/generate'.format(self.get_url())\r\n request = http.Request('POST', url)\r\n return request, parsers.parse_json", "def access_key(self) -> Optional['outputs.AsymmetricEncryptedSecretResponse']:\n return pulumi.get(self, \"access_key\")", "def get_crypt_key():\n\n get_crypt_query = 'SELECT crypt.crypt_key ' \\\n 'FROM crypt ' \\\n 'WHERE key_id = 1'\n\n my_cursor.execute(get_crypt_query)\n stored_key = my_cursor.fetchone()\n\n # 'fetchone()' returns a union or tuple. To get the key, we take the first value:\n stored_key = stored_key[0]\n return stored_key", "def get_coin_privkey(self, coin, idx=0):\n return self.get_coin_address(coin=coin, idx=idx)[\"privkey\"]", "def get_api_key(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'api_key')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def payReturn(request, *args, **kwargs):\n initParam = {}\n pay_key = request.session.get('pay_key', None)\n gateway = request.session.get('gateway', None)\n if pay_key and gateway:\n del request.session['pay_key']\n del request.session['gateway']\n #Check and get Transaction information\n checkMethod = kwargs.pop('checkMethod', None)\n if checkMethod:\n initParam['pay_key'] = pay_key\n initParam['gateway'] = gateway\n transaction = checkMethod(request, initParam=initParam)\n if transaction:\n p = driver.PayPal()\n #Check whether use has paid successfully.\n result = p.check_ap_payment_status(transaction.pay_key)\n if result['status'][0] == 'COMPLETED':\n #Do something after user payed successfully.\n executeMethod = kwargs.pop('executeMethod', None)\n if executeMethod:\n initParam['transaction_id'] = transaction.id\n initParam['buyer_account'] = result['senderEmail'][0]\n if executeMethod(initParam=initParam):\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if back_page:\n del request.session['back_page']\n if success_page:\n del request.session['success_page']\n initParam['success_page'] = success_page\n initParam['success_page_msg'] = request.session['success_page_msg']\n #For the value in paypal_success.html\n initParam['app'] = transaction.app\n initParam['price'] = transaction.price\n initParam['type'] = 'Transaction'\n initParam['msg'] = _('You have successfully paid the money. We have already sent an email to the app seller. In the meanwhile you can send private message to seller as well.')\n log.info(_('User %(param1)s has paid with transaction id %(param2)s.')\n % {'param1': request.user.username, 'param2': transaction.id})\n return render_to_response(\"payment/paypal_success.html\", initParam, context_instance=RequestContext(request))\n else:\n log.error(_('User %(param1)s has paid with transaction id %(param2)s, but execute method %(param3)s failed.')\n % {'param1': request.user.username, 'param2': transaction.id, 'param3': executeMethod.__name__})\n else:\n log.error(_('User %(param1)s has paid with transaction id %(param2)s, but ExecuteMethod does not exist.')\n % {'param1': request.user.username, 'param2': transaction.id})\n else:\n log.error(_('User %(param1)s has no paid with transaction id %(param2)s.')\n % {'param1': request.user.username, 'param2': transaction.id})\n else:\n log.error(_('PayKey %(param1)s, Gateway: %(param2)s, User: %(param3)s, Execute method %(param4)s failed.')\n % {'param1': pay_key, 'param2': gateway, 'param3': request.user.username, 'param4': checkMethod.__name__})\n else:\n log.error(_('PayKey %(param1)s, Gateway: %(param2)s, CheckMethod does not exist.')\n % {'param1': pay_key, 'param2': gateway})\n else:\n log.error(_('Pay. PayKey or Gateway no exists.'))\n\n success_page = request.session.get('success_page', None)\n back_page = request.session.get('back_page', None)\n if success_page:\n del request.session['success_page']\n if back_page:\n del request.session['back_page']\n error_msg = driver.GENERIC_PAYPAL_ERROR\n page_msg = request.session['back_page_msg']\n return render_to_response('payment/paypal_cancel.html',\n {'error_msg': error_msg, 'back_page': back_page, 'back_page_msg': page_msg}, context_instance=RequestContext(request))\n else:\n error_msg = _('%(param1)s Please transaction again.') % {'param1': driver.GENERIC_PAYPAL_ERROR}\n return render_to_response('payment/paypal_error.html',\n {\"error_msg\": error_msg}, context_instance=RequestContext(request))", "def access_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"access_key\")", "def get_api_key(self, email: str, password: str) -> json:\n\n headers = {\n 'email': email,\n 'password': password\n }\n res = requests.get(self.base_url + 'api/key', headers=headers)\n status = res.status_code\n result = \"\"\n try:\n result = res.json()\n except:\n result = res.text\n\n return status, result", "def confirm_transaction_response(response) -> dict:\n result = {}\n root = ET.fromstring(response)\n namespace_ = {\n \"SOAP-ENV\": \"http://schemas.xmlsoap.org/soap/envelope/\",\n \"ns1\": \"tns:ns\"}\n for child in root.findall(\"SOAP-ENV:Body\", namespace_):\n checkout_element = child.find(\n \"ns1:transactionConfirmResponse\", namespace_)\n result[\"status_code\"] = checkout_element.find(\"RETURN_CODE\").text\n result[\"desc\"] = checkout_element.find(\"DESCRIPTION\").text\n result[\"trans_id\"] = checkout_element.find(\"TRX_ID\").text\n result[\"merchant_trans_id\"] = checkout_element.find(\n \"MERCHANT_TRANSACTION_ID\").text\n\n return result", "def key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key\")" ]
[ "0.59357", "0.57256216", "0.5548928", "0.5383697", "0.53185004", "0.52857995", "0.5276171", "0.5259969", "0.5204575", "0.5185268", "0.5182574", "0.51728517", "0.5126173", "0.51261204", "0.51226956", "0.5106314", "0.5106314", "0.50934404", "0.5050427", "0.50257576", "0.4965156", "0.4957235", "0.49357206", "0.49211797", "0.49152824", "0.49000782", "0.48920712", "0.48867658", "0.48850912", "0.48850912" ]
0.8757863
0
Utility method to retrieve the list of errors (if any) from a PayPal response.
def get_errors(response): errors = response.get("error") if errors: return [e.get("message") for e in errors] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_errors(self, response: response_domain_model.Response, question_code: str) -> Sequence['ValidationError']:\n ...", "def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []", "def _get_errors(exc):\n if hasattr(exc, 'message'):\n errors = exc.messages\n else:\n errors = [str(exc)]\n return errors", "def getErrorsList(self):\n return self.__errors", "def report_transaction_error_messages(self):\n response = self.__get_transaction_response()\n\n # get response data from response object\n response_data = response.json()\n\n # get error messages\n response_error = response_data['Error']\n response_error_messages = response_error['messages']\n\n # add all error messages to the report\n error_messages_to_report = []\n for response_error_message in response_error_messages:\n error_description = response_error_message['description']\n error_messages_to_report.append(error_description)\n\n return error_messages_to_report", "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def get_error(self) -> List[str]:\n return []", "def get_error(self) -> List[str]:\n return []", "def errors(self) -> List[Error]:", "def _parse_store_error(self, response):\n default_msg = \"Failure working with the Store: [{}] {!r}\".format(\n response.status_code, response.content\n )\n try:\n error_data = response.json()\n except ValueError:\n return default_msg\n\n try:\n error_info = [(error[\"message\"], error[\"code\"]) for error in error_data[\"error-list\"]]\n except (KeyError, TypeError):\n return default_msg\n\n if not error_info:\n return default_msg\n\n messages = []\n for msg, code in error_info:\n if code:\n msg += \" [code: {}]\".format(code)\n messages.append(msg)\n return \"Store failure! \" + \"; \".join(messages)", "def getErrors(self):\n return self.errors", "def error_data(self):\n\n if not self.__settings:\n return []\n\n return self.__transaction_errors", "def get_errors(self, request):\n\n value = request._get_parameter_value(self)\n return value.errors", "def error_wrapper(x):\n errors = list()\n for error_key, error_list in list(x.items()):\n for error in error_list:\n if error_key == 'non_field_errors':\n errors.append(error)\n else:\n errors.append(\"%s: %s\" % (error_key, error))\n return errors", "def _find_errors_in_page(self, response):\n if response.status_code == 403:\n return \"Could not check for errors, as response was a 403 response\\\n forbidden. User asking for this url did not have permission.\"\n \n \n errors = re.search('<ul class=\"errorlist\">(.*)</ul>', \n response.content, \n re.IGNORECASE)\n\n if errors: \n #show a little around the actual error to scan for variables that\n # might have caused it\n span = errors.span()\n wide_start = max(span[0]-200,0)\n wide_end = min(span[1]+200,len(response.content)) \n wide_error = response.content[wide_start:wide_end]\n return wide_error\n \n return \"\"", "def getErrors(self) -> java.util.Collection:\n ...", "def Errors(self):\r\n\t\treturn self._get_attribute('errors')", "def GetAll(self):\n return self._errors.copy()", "def errors(self):\n return self._errors", "def check_set_errors(self):\n response = self.read()\n return [] if response == \"\" else [response]", "def errors (self):\n return self._errors", "def errors (self):\n return self._errors", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def _get_error_response_data(response_list: list[Any]) -> ErrorResponseData:\n error_code = None\n reason = None\n if len(response_list) > 1:\n error_code = response_list[1]\n if len(response_list) > 2:\n reason = response_list[2]\n\n return ErrorResponseData(\n error_code=error_code,\n reason=reason,\n )", "def extract_form_errors(html):\n errors = re.findall('<ul class=\"errorlist\"(.*)</ul>',\n html,\n re.IGNORECASE)\n \n return errors", "def errors(self):\n return self._properties.get(\"errors\")", "def errors(self):\n return self.__errors", "def getParseErrors(self):\n return [x for x in self.xeps if x.parseErrors]", "def Errcheck(self) -> list:\n\n myError = []\n\n ErrorList = self.myFieldFox.query(\"SYST:ERR?\").split(',')\n\n Error = ErrorList[0]\n\n if int(Error) == 0:\n\n print (\"+0, No Error!\")\n\n else:\n\n while int(Error)!=0:\n\n print (\"Error #: \" + ErrorList[0])\n\n print (\"Error Description: \" + ErrorList[1])\n\n myError.append(ErrorList[0])\n\n myError.append(ErrorList[1])\n\n ErrorList = self.myFieldFox.query(\"SYST:ERR?\").split(',')\n\n Error = ErrorList[0]\n\n myError = list(myError)\n\n return myError", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")" ]
[ "0.6916876", "0.68916273", "0.65724224", "0.6497242", "0.64410913", "0.64169973", "0.6369101", "0.6369101", "0.6323569", "0.6281489", "0.6272857", "0.62684876", "0.6221662", "0.6196093", "0.61649644", "0.6146879", "0.6127754", "0.6113122", "0.6102733", "0.6091104", "0.604821", "0.604821", "0.6032954", "0.60207605", "0.6000429", "0.5983989", "0.5981795", "0.5973551", "0.59720445", "0.5961717" ]
0.80022
0
Running evaluation on test set, appending results to a submission.
def evaluate(model, dataset, append_submission, dataset_root): with open(os.path.join(dataset_root, dataset + '.json'), 'r') as f: image_list = json.load(f) print('Running evaluation on {} set...'.format(dataset)) count_img=0 for img in image_list: img_path = os.path.join(dataset_root, 'images', dataset, img['filename']) pil_img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(pil_img) x = preprocess_input(x) x = np.expand_dims(x, 0) output = model.predict(x) sys.stdout.write('\r'+str(count_img/len(image_list))+' ') sys.stdout.flush() append_submission(img['filename'], output[0, :4], output[0, 4:]) count_img+=1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)", "def evaluate_all_submissions(root_dir, gt_dir, skip_evaluated=False):\n # all submission directory has a 'dt_txts'\n sub_id_dir_pairs = find_submissions(root_dir, 'dt_txts')\n\n for identifier, sub_dir in sub_id_dir_pairs:\n print('Found submission %8s in directory %s' % (identifier, sub_dir))\n n_submissions = len(sub_id_dir_pairs)\n\n # evaluate all submissions\n for i, pair in enumerate(sub_id_dir_pairs):\n identifier, sub_dir = pair\n if skip_evaluated and exists(join(sub_dir, 'eval_results', 'eval_data.pkl')):\n print('Skip %s' % identifier)\n else:\n print('[%2d/%2d] Start evaluating \"%s\" at directory %s' % (i+1, n_submissions, identifier, sub_dir))\n det_eval(gt_dir, join(sub_dir, 'dt_txts'), join(sub_dir, 'eval_results'))", "def test_tests():\n submission = SubmissionBuilder(\"t\", \"b\", [\"anything\"]).build()\n assert submission.get(\"results\") == [\"anything\"], submission", "def evaluate_questions(self):\n for question in self.question_list:\n question.evaluate_question()", "def run(self, config, **kwargs):\n config_parameters = utils.parse_config_or_kwargs(config, **kwargs)\n experiment_path = self.train(config, **kwargs)\n evaluation_logger = utils.getfile_outlogger(\n Path(experiment_path, 'evaluation.log'))\n for testdata, testlabel in zip(config_parameters['testdata'],\n config_parameters['testlabel']):\n evaluation_logger.info(\n f'Evaluting {testdata} with {testlabel} in {experiment_path}')\n # Scores for later evaluation\n scores_file = Path(experiment_path,\n 'scores_' + Path(testdata).stem + '.tsv')\n evaluation_result_file = Path(\n experiment_path) / 'evaluation_{}.txt'.format(\n Path(testdata).stem)\n self.score(experiment_path,\n result_file=scores_file,\n label=testlabel,\n data=testdata)\n self.evaluate_eer(scores_file,\n ground_truth_file=testlabel,\n evaluation_res_file=evaluation_result_file)", "def runtestsuite(self, testsuite):\n if testsuite.status == TestStatus.READY:\n results = testsuite.run()\n else:\n results = ResultList()\n # Disable \"Expression is assigned to nothing\" warning\n # pylint: disable=W0106\n [handler.flush() for handler in self.logger.handlers]\n results.save(heads={'Build': '', 'Branch': self.args.branch})\n sys.stdout.flush()\n self._cleanup_resourceprovider()\n return results", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def run_tests():\n with open(FILENAME) as file:\n # Loads testing parameters from the yaml file.\n tests = yaml.safe_load(file)\n\n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results['Last Average Score'] = \"\"\n results['No of Q-Learning episodes'] = \"\"\n\n # run experiments:\n for i, test in enumerate(test_dict):\n grid = Rooms(test[\"env_size\"], testing=True)\n learning = QLearning(grid, test[\"gamma\"], test[\"alpha\"], test[\"agent_start_pos\"])\n e_greedy = Policy(\"e-greedy\", test[\"epsilon\"], test[\"decay\"])\n greedy = Policy(policy_type=\"greedy\")\n experiment = Experiments(grid, learning, greedy, test[\"iters\"],\n test[\"agent_start_pos\"], test[\"test_no\"])\n\n for session in range(test[\"iters\"]):\n learning.run_multiple_episodes(test[\"batch_episodes\"], e_greedy)\n mean_reward = experiment.run_experiments(test[\"exp_per_batch\"])\n\n results.loc[i,'Last Average Score'] = mean_reward\n results.loc[i,'No of Q-Learning episodes'] = (session + 1) * test[\"batch_episodes\"]\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n # plot & save graphs\n experiment.generate_results(test[\"test_no\"], test)\n\n return results", "def evaluate(self, test_set, predicted_values, certainty):\r\n\r\n if self.classification_type == \"classification\":\r\n self.classification_evaluation(test_set, predicted_values, certainty)\r\n elif self.classification_type == \"regression\":\r\n self.regression_evaluation(test_set, predicted_values)", "def evaluate_batch(self, pipelines):", "def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test\n # set.\n return list(map(self.classify, test))", "def evaluate_model(self, predictions, expected, bypass_data_to_eval):\n\n result = []\n for i, unique_id in enumerate(np.squeeze(expected[\"unique_ids\"])):\n start_logits = predictions['tf_electra_for_question_answering'][i]\n start_top_index = predictions['tf_electra_for_question_answering_1'\n ][i]\n end_logits = predictions['tf_electra_for_question_answering_2'][i]\n end_top_index = predictions['tf_electra_for_question_answering_3'][i\n ]\n cls_logits = predictions['tf_electra_for_question_answering_4'][i]\n\n result.append(\n SquadResult(\n unique_id,\n start_logits.tolist(),\n end_logits.tolist(),\n start_top_index=start_top_index.tolist(),\n end_top_index=end_top_index.tolist(),\n cls_logits=cls_logits.tolist(),\n )\n )\n\n dev_features = bypass_data_to_eval[\"dev_features\"]\n dev_examples = bypass_data_to_eval[\"dev_examples\"]\n\n answers, nbest_answers = get_answers(\n dev_examples, dev_features, result, self._args\n )\n\n output_prediction_file = os.path.join(\n self._args.output_dir, \"predictions.json\"\n )\n output_nbest_file = os.path.join(\n self._args.output_dir, \"nbest_predictions.json\"\n )\n\n with open(output_prediction_file, \"w\") as f:\n f.write(json.dumps(answers, indent=4) + \"\\n\")\n with open(output_nbest_file, \"w\") as f:\n f.write(json.dumps(nbest_answers, indent=4) + \"\\n\")\n\n if self._args.version_2_with_negative:\n dev_file = \"dev-v2.0.json\"\n eval_file = \"evaluate-v2.0.py\"\n else:\n dev_file = \"dev-v1.1.json\"\n eval_file = \"evaluate-v1.1.py\"\n\n command_str = (\n f\"{sys.executable} {os.path.join(self._args.data_dir, eval_file)} \"\n f\"{os.path.join(self._args.data_dir, dev_file)} \"\n f\"{output_prediction_file}\"\n )\n\n logging.debug(f\"\\nExecuting: `{command_str}`\\n\")\n\n eval_out = subprocess.check_output(shlex.split(command_str))\n\n # scores: {'exact_match': 87.06717123935667, 'f1': 92.78048326711645}\n scores = json.loads(eval_out.decode(\"UTF-8\").strip())\n\n logging.debug(\"scores:\", scores)\n\n metric_units = \"f1\"\n\n return scores[metric_units], metric_units", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def step(self) -> ResultDict:\n # Do we have to run `self.evaluate()` this iteration?\n # `self.iteration` gets incremented after this function returns,\n # meaning that e. g. the first time this function is called,\n # self.iteration will be 0.\n evaluate_this_iter = (\n self.config.evaluation_interval is not None\n and (self.iteration + 1) % self.config.evaluation_interval == 0\n )\n\n # Results dict for training (and if appolicable: evaluation).\n results: ResultDict = {}\n\n # Parallel eval + training: Kick off evaluation-loop and parallel train() call.\n if evaluate_this_iter and self.config.evaluation_parallel_to_training:\n (\n results,\n train_iter_ctx,\n ) = self._run_one_training_iteration_and_evaluation_in_parallel()\n # - No evaluation necessary, just run the next training iteration.\n # - We have to evaluate in this training iteration, but no parallelism ->\n # evaluate after the training iteration is entirely done.\n else:\n results, train_iter_ctx = self._run_one_training_iteration()\n\n # Sequential: Train (already done above), then evaluate.\n if evaluate_this_iter and not self.config.evaluation_parallel_to_training:\n results.update(self._run_one_evaluation(train_future=None))\n\n # Attach latest available evaluation results to train results,\n # if necessary.\n if not evaluate_this_iter and self.config.always_attach_evaluation_results:\n assert isinstance(\n self.evaluation_metrics, dict\n ), \"Algorithm.evaluate() needs to return a dict.\"\n results.update(self.evaluation_metrics)\n\n if hasattr(self, \"workers\") and isinstance(self.workers, WorkerSet):\n # Sync filters on workers.\n self._sync_filters_if_needed(\n central_worker=self.workers.local_worker(),\n workers=self.workers,\n config=self.config,\n )\n # TODO (avnishn): Remove the execution plan API by q1 2023\n # Collect worker metrics and add combine them with `results`.\n if self.config._disable_execution_plan_api:\n episodes_this_iter = collect_episodes(\n self.workers,\n self._remote_worker_ids_for_metrics(),\n timeout_seconds=self.config.metrics_episode_collection_timeout_s,\n )\n results = self._compile_iteration_results(\n episodes_this_iter=episodes_this_iter,\n step_ctx=train_iter_ctx,\n iteration_results=results,\n )\n\n # Check `env_task_fn` for possible update of the env's task.\n if self.config.env_task_fn is not None:\n if not callable(self.config.env_task_fn):\n raise ValueError(\n \"`env_task_fn` must be None or a callable taking \"\n \"[train_results, env, env_ctx] as args!\"\n )\n\n def fn(env, env_context, task_fn):\n new_task = task_fn(results, env, env_context)\n cur_task = env.get_task()\n if cur_task != new_task:\n env.set_task(new_task)\n\n fn = functools.partial(fn, task_fn=self.config.env_task_fn)\n self.workers.foreach_env_with_context(fn)\n\n return results", "def evaluate(self, test=None):\n if test is None:\n test = self.testSet.input\n # Once you can classify an instance, just use map for all of the test set.\n return list(map(self.classify, test))", "def train_and_evaluate(name, model, train, test, evaluation, final_eval, output_dir):\n\n print(\"---\" * 5)\n print(\"Running pipeline for {}\".format(name))\n\n plot_dir = os.path.join(output_dir, \"plots\")\n\n pipeline = make_pipeline(model)\n\n X_train, y_train = train.drop(\n [\"PM10\"], axis=1).values, train[\"PM10\"].values\n X_test, y_test = test.drop([\"PM10\"], axis=1).values, test[\"PM10\"].values\n X_eval, y_eval = evaluation.drop(\n [\"PM10\"], axis=1).values, evaluation[\"PM10\"].values\n X_final, y_final = final_eval.drop(\n [\"PM10\"], axis=1), final_eval[\"PM10\"].values\n\n # first round - fit on train, predict on test\n print(\"Fitting pipeline on train data\")\n pipeline.fit(X_train, y_train)\n yhat = pipeline.predict(X_test)\n mae = mean_absolute_error(y_test, yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(\n y_test, yhat, title=\"{} - Predicted vs. Actual on Test\".format(name), output_dir=plot_dir)\n\n # second round - fit on train + test, predict on evaluation\n X_train = np.concatenate([X_train, X_test])\n y_train = np.concatenate([y_train, y_test])\n print(\"Fitting pipeline on train + test data\")\n pipeline.fit(X_train,y_train)\n yhat = pipeline.predict(X_eval)\n mae = mean_absolute_error(y_eval,yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(y_eval,yhat,title=\"{} - Predicted vs. Actual on Evaluation\".format(name),output_dir=plot_dir)\n\n # final round - fit on last X hours, by which the actual score will be measured\n X_train = np.concatenate([X_train, X_eval])\n y_train = np.concatenate([y_train, y_eval])\n print(\"Fitting pipeline on all \\\"all available data\\\"\")\n pipeline.fit(X_train, y_train)\n yhat = pipeline.predict(X_final)\n mae = mean_absolute_error(y_final, yhat)\n print(\"MAE: {}\".format(mae))\n plot_predictions(\n y_final, yhat, title=\"{} - Predicted vs. Actual\".format(name), output_dir=plot_dir)\n\n # save the model\n joblib.dump(model, os.path.join(\n output_dir, \"models\", \"{}.joblib\".format(name)))\n\n return yhat, mae", "def execute_testsets(testsets):\n group_results = dict() #results, by group\n group_failure_counts = dict()\n total_failures = 0\n myinteractive = False\n\n for testset in testsets:\n mytests = testset.tests\n myconfig = testset.config\n mybenchmarks = testset.benchmarks\n\n #Make sure we actually have tests to execute\n if not mytests and not mybenchmarks:\n # no tests in this test set, probably just imports.. skip to next test set\n break\n\n myinteractive = True if myinteractive or myconfig.interactive else False\n\n #Run tests, collecting statistics as needed\n for test in mytests:\n #Initialize the dictionaries to store test fail counts and results\n if test.group not in group_results:\n group_results[test.group] = list()\n group_failure_counts[test.group] = 0\n\n result = run_test(test, test_config = myconfig)\n result.body = None # Remove the body, save some memory!\n\n if not result.passed: #Print failure, increase failure counts for that test group\n logging.error('Test Failed: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group+\" HTTP Status Code: \"+str(result.response_code))\n\n if test.validators is not None:\n for validator in test.validators:\n if validator.passed == False:\n logging.warning(\" Validation Failed: \" + str(validator))\n\n #Increment test failure counts for that group (adding an entry if not present)\n failures = group_failure_counts[test.group]\n failures = failures + 1\n group_failure_counts[test.group] = failures\n\n else: #Test passed, print results\n logging.info('Test Succeeded: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group)\n\n #Add results for this test group to the resultset\n group_results[test.group].append(result)\n\n # handle stop_on_failure flag\n if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:\n print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'\n break\n\n for benchmark in mybenchmarks: # Run benchmarks, analyze, write\n if not benchmark.metrics:\n logging.debug('Skipping benchmark, no metrics to collect')\n continue\n\n logging.info(\"Benchmark Starting: \"+benchmark.name+\" Group: \"+benchmark.group)\n curl = configure_curl(benchmark, myconfig)\n benchmark_result = run_benchmark(curl, benchmark, myconfig)\n print benchmark_result\n logging.info(\"Benchmark Done: \"+benchmark.name+\" Group: \"+benchmark.group)\n\n if benchmark.output_file: # Write file\n write_method = OUTPUT_METHODS[benchmark.output_format]\n my_file = open(benchmark.output_file, 'w') # Overwrites file\n logging.debug(\"Benchmark writing to file: \" + benchmark.output_file)\n write_method(my_file, benchmark_result, benchmark, test_config = myconfig)\n my_file.close()\n\n if myinteractive:\n # a break for when interactive bits are complete, before summary data\n print \"===================================\"\n\n #Print summary results\n for group in sorted(group_results.keys()):\n test_count = len(group_results[group])\n failures = group_failure_counts[group]\n total_failures = total_failures + failures\n if (failures > 0):\n print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n else:\n print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n\n return total_failures", "def evaluate(self, test):\r\n self.logger.info(\"Testing model over test set\")\r\n metrics = self.run_evaluate(test)\r\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\r\n for k, v in metrics.items()])\r\n self.logger.info(msg)\r\n return metrics", "def run(self, worker, evaluator=None):\n pass", "def print_eval(trainset, testset, exptypes=EXPTYPES, semantic=False, savemodels=False, loadmodels=False, deprep=False, externals=True, predict=True):\n system_pairs = []\n print \"== cleaning lsts ==\"\n cleanupnonespanexpressions(testset)\n cleanholdercandidates(testset)\n cleanholders(testset)\n cleanupnonespanexpressions(trainset)\n cleanholdercandidates(trainset)\n cleanholders(trainset)\n \n print \"== train ==\"\n ev = evaluate()\n features, labels, stats = getfeaturesandlabels(trainset, semantic=semantic, predict=False)\n print counters, '\\n'\n\n print \"== test ==\"\n counters.clear()\n ftest, ltest, stest = getfeaturesandlabels(testset, semantic=semantic, predict=predict)\n print counters\n for exp in exptypes:\n vec, X, y = create_matrix(features[exp], labels[exp])\n if externals:\n vecw, Xw, yw = create_matrix(features[exp + 'w'], labels[exp + 'w'])\n vecimp, Ximp, yimp = create_matrix(features[exp + 'w'], labels[exp + 'implicit'])\n if loadmodels:\n clf = read_model(loadmodels + exp)\n else:\n clf = create_model(X, y)\n if externals:\n clfw = create_model(Xw, yw)\n clfimp = create_model(Ximp, yimp)\n if savemodels:\n write_model(clf, savemodels + exp)\n print \"== eval ==\"\n if deprep:\n print \"== {} ==\".format(deprep)\n Xt, yt = transform_to_matrix(ftest[exp], ltest[exp], vec)\n if externals:\n Xtw, ytw = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'w'], vecw)\n Xtimp, ytimp = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'implicit'], vecimp)\n results = clf.predict_proba(Xt)\n s_p_w = False\n s_p_imp = False\n gold_p1 = ev.get_unique_exp(copy.deepcopy(stest['positions'][exp + 'w']), exp, count=False)\n gold_p2 = copy.deepcopy(gold_p1)\n gold_p3 = copy.deepcopy(gold_p1)\n if clfw:\n resultsw = clfw.predict_proba(Xtw)\n s_p_w=ev.get_system_pairs_prob(stest['positions'][exp + 'w'], resultsw, gold_p1)\n counters['s_p_w' + exp] = len(s_p_w)\n if DEBUG:\n print \"RESULTSW\"\n print resultsw\n if clfimp:\n resultsimp = clfimp.predict_proba(Xtimp)\n s_p_imp=ev.get_system_pairs_prob(stest['positions'][exp + 'implicit'], resultsimp, gold_p2)\n counters['s_p_imp' + exp] = len(s_p_imp)\n if DEBUG:\n print \"RESULTSIMP\"\n print resultsimp\n s_p_int=ev.get_system_pairs_prob(stest['positions'][exp], results, gold_p3)\n counters['s_p_int' + exp] = len(s_p_int)\n system_pairs_exp = ev.merge_system_pairs(s_p_int, s_p_imp=s_p_imp, s_p_w=s_p_w)\n counters['system_pairs_all' + exp] = len(system_pairs_exp)\n for pair in system_pairs_exp:\n if 'confidence' in pair and pair['confidence'] > 0:\n counters['system_pairs' + exp] += 1\n if predict:\n ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)\n print \"system exp - {}:\\n{}\".format(exp, prf_prettystring(ssc_exp))\n else:\n ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)\n print \"gold exp - {}:\\n{}\".format(exp, prf_prettystring(ssc_exp))\n system_pairs.extend(system_pairs_exp)\n if predict:\n ssc = ev.spansetcoverage_o_p(system_pairs)\n print \"system exp - all:\\n\", prf_prettystring(ssc)\n else:\n ssc = ev.spansetcoverage_o_p(system_pairs)\n print \"gold exp - all: \\n\", prf_prettystring(ssc)\n \n for k,v in sorted(counters.items(), key=lambda x: x[0]):\n print k, v\n if isinstance(deprep, basestring):\n dump_jsonfile(system_pairs, 'system_pairs-' + deprep + '.json')\n return {'stats': stest, 'system_pairs': system_pairs}", "def evaluate(self, test):\n self.logger.info(\"Testing model over test set\")\n metrics = self.run_evaluate(test)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n self.logger.info(msg)\n return metrics", "def evaluate(self):\n try:\n self._evaluate()\n except Exception as e:\n if str(e) == \"assignment destination is read-only\":\n log.exception(\n \"Encountered error during scenario evaluation. Be sure \"\n + \"that the classifier's predict() isn't directly modifying the \"\n + \"input variable itself, as this can cause unexpected behavior in ART.\"\n )\n else:\n log.exception(\"Encountered error during scenario evaluation.\")\n sys.exit(1)\n\n if self.results is None:\n log.warning(f\"{self._evaluate} did not set self.results to a dict\")\n\n self.save()", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n print('*'*20, '\\n')\n print('precision:', p, 'recall:', r, 'f1:', f1, '\\n')\n print('*'*20)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def evaluate(self) -> None:\n eval_results = {'segmentation': self.evaluate_segmentation()}\n if self.task == 'tracking':\n eval_results['tracking'] = self.evaluate_tracking()\n self.save_result(eval_results)", "def evaluate(self, runs=100):\n score_record = []\n \n print('Evaluation in progress...')\n for i in range(runs):\n score = self.run_evaluation_episode()\n score_record.append(score)\n \n ave_score = np.mean(score_record)\n \n print('System evaluated with an average score of {} in {} runs'.format(ave_score, runs))", "def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def run(self):\n if self.verbose:\n print(f'Running {self.name} tests...')\n\n # try running setup if there is one\n if self.setup:\n self.__process_setup()\n\n final_report = [None] * len(self.tests)\n\n for test_in, test_out in sorted(self.tests.items()):\n # increment total num of tests\n self.total += 1\n\n if self.verbose:\n print(f'#{self.total}')\n\n # evaluate test input w/ setup vars, if any\n try:\n inp = eval(test_in, self.vars)\n except Exception as err:\n print(f'Issue during evaluation of test input: {err}')\n final_report[self.total - 1] = 'input eval error'\n if self.verbose:\n print(f'Test input was: {test_in}')\n print('Vars from execution: {}'.format({k : v for k, v in self.vars.items() if k != '__builtins__'}))\n continue\n\n \n # checking if function input has more than one arg\n if type(inp) in (list, tuple):\n try:\n student_out = self.student_function(*inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Inputs were: {inp}')\n continue\n else:\n try:\n student_out = self.student_function(inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Input was: {inp}')\n continue\n\n # ans alias for ease of answer checking\n self.vars['ans'] = student_out\n\n if self.schema:\n format_vals = eval(test_out, self.vars)\n results, maybe_failed_schema = self.__process_schema(format_vals)\n if all(results):\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed at least one of the tests\n failed_str = \" and \".join([\", \".join(maybe_failed_schema[:-1]),maybe_failed_schema[-1]] if len(maybe_failed_schema) > 2 else maybe_failed_schema)\n final_report[self.total - 1] = f'FAILED; failed following assertion(s): {failed_str}'\n else:\n expected_ans = eval(test_out, self.vars)\n if student_out == expected_ans:\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed the only test\n final_report[self.total - 1] = f'FAILED; got {repr(student_out)} but expected {repr(expected_ans)}'\n\n # run callback function, if there is one\n if self.callback:\n if self.verbose:\n print('Running callback...')\n print('call back is:', self.callback)\n\n # once done, put the final report on the queue\n self.queue.put((self.student_username, self.name, f'{self.correct}/{self.total}', final_report))" ]
[ "0.6631348", "0.63206565", "0.63080895", "0.6257351", "0.6243088", "0.6196527", "0.6184294", "0.6101646", "0.60934335", "0.6092591", "0.60906154", "0.608936", "0.6052524", "0.6040191", "0.60248965", "0.6024804", "0.5999703", "0.59867626", "0.59778136", "0.5951736", "0.59259516", "0.5896043", "0.58836865", "0.5850132", "0.5834884", "0.581591", "0.5812924", "0.5812924", "0.5812924", "0.5793248" ]
0.63503134
1
Asks Noembed_ for the embedding HTML code for arbitrary URLs. Sites supported include Youtube, Vimeo, Twitter and many others. Successful embeds are always cached for 30 days. Failures are cached if ``cache_failures`` is ``True`` (the default). The
def oembed_html(url, cache_failures=True): # Thundering herd problem etc... key = 'oembed-url-%s' % md5(url.encode('utf-8')).hexdigest() html = cache.get(key) if html is not None: return html try: html = requests.get( 'https://noembed.com/embed', params={ 'url': url, 'nowrap': 'on', 'maxwidth': 1200, 'maxheight': 800, }, timeout=2, ).json().get('html', '') except (requests.ConnectionError, requests.ReadTimeout): # Connection failed? Hopefully temporary, try again soon. if cache_failures: cache.set(key, '', timeout=60) return '' except (ValueError, requests.HTTPError): # Oof... HTTP error code, or no JSON? Try again tomorrow, # and we should really log this. if cache_failures: cache.set(key, '', timeout=86400) return '' else: # Perfect, cache for 30 days cache.set(key, html, timeout=30 * 86400) return html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _oembed_request(self, url):\n try:\n response = cache.get(url)\n if not response:\n resp = urllib.urlopen(url, timeout=5)\n response = json.loads(resp.read())\n cache.set('embed_'.format(url), response, 60 * 60 * 6) # 6hrs para que se actualize cada tanto\n return response\n except (urllib.URLError, ValueError, IndexError, TimeoutException, BadStatusLine, ssl.SSLError):\n return {}", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return self.original_url\n \n return 'https://www.slideshare.net/slideshow/embed_code/%s' % self.get_video_id()", "async def fetch_link_embed(self, url: str):\n data = await self.http.get_embed_for_url(url)\n embed = Embed.from_unfurl_dict(data)\n return embed", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n if not self.embed_url:\n self.embed_url = 'https://www.youtube.com/embed/%s?wmode=transparent' % self.get_video_id()\n \n return self.embed_url", "def format_webembed(project_id, url=None):\n if not url:\n return \"Please provide a valid demo link.\"\n urltest = url.lower().strip()\n if urltest.startswith('<iframe '):\n # Allow IFRAMEs\n # TODO: add a setting\n return url\n elif urltest.endswith('.pdf'):\n # Embedded document\n url = url_for('project.render', project_id=project_id)\n # url = '/project/%d/render' % project_id\n elif urltest.startswith('https://query.wikidata.org/'):\n # Fix WikiData queries\n url = url.replace('https://query.wikidata.org/',\n 'https://query.wikidata.org/embed.html')\n elif urltest.startswith('https://youtu.be/'):\n # Fix YouTube mobile link\n url = url.replace('https://youtu.be/',\n 'https://www.youtube.com/embed/')\n url = url.replace('?t=', '?start=')\n elif urltest.startswith('https://www.youtube.com/watch?'):\n # Fix YouTube web link\n url = url.replace('https://www.youtube.com/watch?v=',\n 'https://www.youtube.com/embed/')\n url = url.replace('?t=', '?start=')\n # TODO: add more embeddables here\n return '<iframe src=\"%s\"></iframe>' % url", "def get_embed_url(self):\n if not self.get_video_id() or not self.get_username():\n return ''\n \n return 'http://cdn.livestream.com/embed/%s?layout=4&amp;clip=%s' % (self.get_username(), self.get_video_id())", "def get_embed_url(self):\n if not self._oembed:\n return ''\n \n if not self.original_url:\n return ''\n \n return 'https://w.soundcloud.com/player/?url=%s' % (self.original_url)", "def get_embed(url, params=None):\n if not url:\n return None\n \n embed_dict = []\n if type(url) == dict:\n \n # For those weird cases when og:video:type is declared but not og:video\n if not 'url' in url:\n return None\n \n #video_type = url.get('type')\n embed_dict = url\n url = url['url']\n \n url = url.strip()\n \n p = urlparse.urlparse(url)\n for embed_subclass in Embed.__subclasses__():\n if embed_subclass.check_url(parse_url=p):\n return embed_subclass(url, params=None)\n # if not was between subclass of EmbedResource\n if all(x in embed_dict for x in ['video', 'url']):\n \n # custom for new.livestream.com, clear iframe inside the url, and add w&h\n if 'iframe src=' in embed_dict['video']:\n embed_dict['video'] = EmbedFactory._livestream_clean(embed_dict['video'])\n \n embed_url = urlparse.urlparse(embed_dict['video'])\n for provider in WHITELIST_EMBED_PROVIDERS:\n if re.search(provider.get('name', None), embed_url.netloc):\n params = {\"embed_url\": embed_url}\n params['provider_name'] = embed_dict['site_name'] if 'site_name' in embed_dict else None\n params['thumbnail_url'] = embed_dict['image'] if 'image' in embed_dict else None\n \n if provider.get('height', False) and 'height' in embed_dict:\n params['height'] = embed_dict.get('height', None)\n \n return GenericEmbed(embed_dict['url'], params)\n \n # This should be a \"generic\" case, mostly comming from og:video\n # Chose to disable it as it's very difficult to make it work for any generic case\n # Check http://soc.li/SHxUtad\n #return generic_video_dict(url, video_type)\n return None\n return None", "def get_embed_url(self):\n if not self.original_url:\n return ''\n \n return 'https://vine.co/v/%s/embed/simple' % (self.get_video_id())", "def get_embed_url(self):\n if not self.original_url:\n return ''\n \n if not self.video_id:\n return ''\n \n return 'http://embed.bambuser.com/broadcast/%s?context=b_simple&autoplay=0&chat=0' % (self.video_id)", "def get_embed_url(self):\n return self.embed_url", "def test_youtube_iframe():\n\n sample = \".. youtube:: YID\\n :height: 400\\n :width: 600\"\n html = get_html_from_rst(sample)\n assert_html_contains(\n html,\n \"iframe\",\n attributes={\n \"src\": (\n \"https://www.youtube-nocookie.com\"\n \"/embed/YID?rel=0&\"\n \"wmode=transparent\"\n ),\n \"height\": \"400\",\n \"width\": \"600\",\n \"frameborder\": \"0\",\n \"allowfullscreen\": \"\",\n \"allow\": \"encrypted-media\",\n },\n )", "async def ig(self, ctx, url):\n response = requests.get(url.replace(\"`\", \"\"), headers={\"Accept-Encoding\": \"utf-8\"})\n soup = BeautifulSoup(response.text, 'html.parser')\n script = soup.find_all('script')\n sources = []\n found_date = False\n post_date = None\n for i in range(len(script)):\n urls = re.findall('\"display_url\":\"(.*?)\"', script[i].text)\n if urls:\n sources = urls\n if not found_date:\n try:\n data = json.loads(script[i].text, encoding='utf-8')\n datestring = data.get('uploadDate')\n post_date = datetime.datetime.strptime(datestring, \"%Y-%m-%dT%H:%M:%S\")\n found_date = True\n except json.JSONDecodeError:\n pass\n sources = list(set(sources))\n\n date = re.findall('<script type=\"application/ld+json\">(.*?)</script>', response.text)\n print(date)\n\n if sources:\n content = discord.Embed(title=soup.title.string, url=url)\n if post_date is not None:\n content.timestamp = post_date\n for url in sources:\n content.set_image(url=url)\n await ctx.send(embed=content)\n self.logger.info(misolog.format_log(ctx, f\"Success\"))\n else:\n await ctx.send(\"Found nothing, sorry!\")\n self.logger.warning(misolog.format_log(ctx, f\"Found nothing\"))", "def get_embed_url(self):\n embed_url = None\n youtube_embed_url = 'https://www.youtube.com/embed/{}'\n vimeo_embed_url = 'https://player.vimeo.com/video/{}'\n\n # Get video ID from url.\n if re.match(YOUTUBE_URL_RE, self.url):\n embed_url = youtube_embed_url.format(re.match(YOUTUBE_URL_RE, self.url).group(2))\n if re.match(VIMEO_URL_RE, self.url):\n embed_url = vimeo_embed_url.format(re.match(VIMEO_URL_RE, self.url).group(3))\n return embed_url", "def embed_images(self, html):\n if not self.SUPPORT_EMBED_IMAGES:\n raise RuntimeError('%r does not support embed_images' % type(self))\n\n return self.RE_IMG.sub(self._embed_image, html)", "def oembed(self, url):\r\n _url = '{0}/oembed'.format(self.get_url())\r\n return http.Request('GET', _url, {'url': url}), parsers.parse_json", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n return 'https://www.dailymotion.com/embed/video/%s' % self.get_video_id()", "def test_embed_ok(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_URL) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"youtube_video\")\n self.find(\"<object width\")\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', NOTAGS_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_EMBED) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"<object width\")", "def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response", "def oembed(self, url):\n _url = '{0}/oembed'.format(self.get_url())\n return http.Request('GET', _url, {'url': url}), parsers.parse_json", "def get_embed_url(self):\n raise NotImplementedError(\"Subclass must implement abstract method get_embed_url\")", "def shouldSkipUrl(self, url, data):\n return data.xpath('//img[contains(@src, \"content-error-missing\")]')", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return self.original_url\n \n return 'http://www.slideshare.net/slideshow/embed_code/%s' % self.get_video_id()", "def blocked_malicious_url(self):\n return self._frame_response(\"{protocol}://{hostname}/whitelist/\" \\\n \"attackdomain={url}\".format(**self._formatdict()))", "def get_url():\r\n songs = []\r\n with open(FILE_CONTAINING_URLS) as f:\r\n for line in f:\r\n if not line.startswith(\"#\") and is_web_url(line):\r\n songs.append(line)\r\n\r\n # pick a random song and store it in song variable\r\n song = random.choice(songs)\r\n\r\n url_attempts = []\r\n\r\n for x in range(RETRY_COUNT):\r\n response = requests.get(song)\r\n # check if URL is valid and also make sure video is available\r\n if response.ok and video_is_available(song):\r\n return song\r\n # store failed URL\r\n url_attempts.append(song)\r\n # choose new random song\r\n song = random.choice(songs)\r\n\r\n print(\"Could not access video URLs. Please check network connection\")\r\n print(\"Tried the following URLs before failing:\")\r\n print(\"\\n\".join(url_attempts))\r\n exit(1)", "def test_content_invalid_url(self):\n from .views import WikiViews\n request = testing.DummyRequest()\n request.POST = {'wiki_url': INVALID_URL}\n wiki_view = WikiViews(request)\n info = wiki_view.content()\n self.assertTrue('error_message' in info)", "def _render_no_tracking(self, video_id):\n you_tube_url = (\n 'https://www.youtube.com/embed/%s'\n '?feature=player_embedded&amp;rel=0') % video_id\n iframe = cElementTree.XML(\"\"\"\n<div class=\"gcb-video-container\">\n <iframe class=\"youtube-player\" title=\"YouTube Video Player\"\n type=\"text/html\" frameborder=\"0\" allowfullscreen=\"allowfullscreen\">\n </iframe>\n</div>\"\"\")\n iframe[0].set('src', you_tube_url)\n return iframe", "def render_external(plugin, **kwargs):\n\n html = oembed_html(plugin.url)\n if 'youtube.com' in html:\n return mark_safe(\n '<div class=\"flex-video widescreen\">{}</div>'.format(html))\n if 'vimeo.com' in html:\n return mark_safe(\n '<div class=\"flex-video widescreen vimeo\">{}</div>'.format(html))\n return mark_safe(html)", "def get_embed_dict(self):\n if not self.get_url() or not self.get_embed_url():\n return None\n \n output = {\n \"url\": self.get_url(),\n \"embed_url\": self.get_embed_url(),\n \"provider_url\": self.get_provider_url(),\n \"provider_name\": self.get_provider_name(),\n \"thumbnail_url\": self.get_thumbnail_url(),\n \"type\": \"video\"\n }\n if self.get_height():\n output['iframe_height'] = self.get_height()\n if self.get_width():\n output['iframe_width'] = self.get_width()\n\n return output", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n return 'https://player.vimeo.com/video/%s' % self.get_video_id()" ]
[ "0.61671865", "0.557492", "0.55536354", "0.5506088", "0.5394031", "0.5333495", "0.5313941", "0.5254451", "0.5248169", "0.5236533", "0.5202616", "0.5186459", "0.51711", "0.51031286", "0.5083401", "0.50735337", "0.5036362", "0.5028727", "0.50113606", "0.50018126", "0.5001552", "0.4978021", "0.49541393", "0.49505806", "0.4928095", "0.49225742", "0.49133587", "0.49117622", "0.48816332", "0.48573667" ]
0.7094637
0
Gets or creates a Folder based the list of folder names in hierarchical order (like breadcrumbs). get_or_create_folder(['root', 'subfolder', 'subsub folder']) creates the folders with correct parent relations and returns the 'subsub folder' instance.
def get_or_create_folder(self, folder_names): if not len(folder_names): return None current_parent = None for folder_name in folder_names: current_parent, created = Folder.objects.get_or_create( name=folder_name, parent=current_parent) if created: self.folder_created += 1 if self.verbosity >= 2: print("folder_created #%s folder : %s -- created : %s" % (self.folder_created, current_parent, created)) return current_parent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_folder_by_path(self, folder_path):\n\n current_parent_id = self._get_root_metadata()['id']\n\n path_folders = StoreTree.get_path_levels(folder_path)\n\n if path_folders[0] == '':\n return current_parent_id\n\n for folder_name in path_folders:\n current_parent_id = self.create_folder(current_parent_id, folder_name)\n\n return current_parent_id", "def create_folder(client, parent_folder_id, folder_name):\n\n try:\n subfolder = client.folder(parent_folder_id).create_subfolder(folder_name)\n print(f'Created subfolder with ID {subfolder.id}')\n\n except Exception as e:\n print(f\"An error occurred: {e}\")", "def create_sub_folder(self, name):\n params = {\n \"name\": name\n }\n pf = self.client._perform_json(\"POST\", \"/project-folders/%s/children\" % self.project_folder_id, params=params)\n return DSSProjectFolder(self.client, pf[\"id\"])", "def create_folder(id_parent, name):\n id_folder = incr_key_store('folders:counter')\n rpush_key_store('folders:list', {'id': id_folder, 'parent': id_parent, 'name': name})\n return id_folder", "def create_folder(folder_path: List[str]) -> str:\n drive = _drive_gen()\n return _create_or_find_folder(folder_path, drive)", "def get_save_root(self, sub_directories_list=None,\r\n create_folder_if_no_exist=True):\r\n if sub_directories_list is None:\r\n sub_directories_list = []\r\n elif not isinstance(sub_directories_list, list):\r\n # Assume string, normally I would check for a string but apparently this\r\n # is a bit quirky with Python 2 vs 3\r\n sub_directories_list = [sub_directories_list]\r\n\r\n if getattr(self, 'default_save_path', None) is not None:\r\n root_path = self.default_save_path\r\n if not os.path.isdir(root_path):\r\n raise Exception('Specified default save path does not exist')\r\n else:\r\n # http://stackoverflow.com/questions/50499/in-python-how-do-i-get-the-path-and-name-of-the-file-that-is-currently-executin/50905#50905\r\n package_path = os.path.dirname(\r\n os.path.abspath(inspect.getfile(inspect.currentframe())))\r\n\r\n # Go up to root, then down to specific save path\r\n root_path = os.path.split(package_path)[0]\r\n root_path = os.path.join(root_path, 'data')\r\n\r\n save_folder_path = os.path.join(root_path, *sub_directories_list)\r\n\r\n if create_folder_if_no_exist and not os.path.exists(save_folder_path):\r\n os.makedirs(save_folder_path)\r\n\r\n return save_folder_path", "def create_folder(path_folder, name_subfolder=None):\n if not name_subfolder:\n if not os.path.exists(path_folder):\n os.makedirs(path_folder)\n else:\n path_result_subolder = os.path.join(path_folder, name_subfolder)\n if not os.path.exists(path_result_subolder):\n os.makedirs(path_result_subolder)", "def createFolderStructure(self, rootfolder, filepath, parents):\n\n fp = filepath.resolve()\n folders = list(fp.parts)\n folders.reverse()\n\n ##remove file from list\n if fp.is_file():\n folders.remove(folders[0])\n\n for i in range(parents, len(folders)):\n folders.remove(folders[-1])\n folders.reverse()\n\n fparent = rootfolder\n\n if fparent:\n # iterate over file path and create the directory\n for fname in folders:\n f = vsdModels.Folder(\n name=fname,\n parentFolder=vsdModels.Folder(selfUrl=fparent.selfUrl)\n )\n fparent = f.create(self)\n return fparent\n else:\n print('Root folder does not exist', rootfolder)\n return None", "def postFolder(self, parent, name, check=True):\n\n folder = vsdModels.Folder()\n if parent is None:\n parent = self.getFolderByName('MyProjects', mode='exact')\n folder.parentFolder = vsdModels.APIBase(selfUrl=parent.selfUrl)\n folder.name = name\n\n exists = False\n\n if check:\n if parent.childFolders:\n for child in parent.childFolders:\n fold = self.getFolder(child.selfUrl)\n if fold is not None:\n if fold.name == name:\n print('folder {0} already exists, id: {1}'.format(name, fold.id))\n exists = True\n return fold\n else:\n print('unexpected error, folder exists but cannot be retrieved')\n exists = True\n\n # print(self.postRequest('folders', data = data))\n if not exists:\n data = folder.to_struct()\n # for name, field in folder:\n # if name not in data:\n # data[name] = None\n # print(data)\n res = self.postRequest('folders', data=data)\n folder.populate(**res)\n print('folder {0} created, has id {1}'.format(name, folder.id))\n assert folder.name == name\n return folder", "def _get_sub_folder_id(self, base_folder_id):\n find_sub_folder = find_my_folder_by_name_by_searching_files(self.sub_folder_name)\n if not find_sub_folder:\n folder_id = create_folder_in_drive(self.sub_folder_name, base_folder_id)\n else:\n folder_id = find_my_folder_by_name_by_searching_files(self.sub_folder_name)['id']\n\n return folder_id", "def _create(self, name: str, parent_id: str) -> CreateFolderResponseModel:\n endpoint: ApiEndpoint = self.api_endpoint_group.create\n request_obj: CreateFolderRequestModel = endpoint.load_request(\n name=name, parent_id=parent_id\n )\n response: CreateFolderResponseModel = endpoint.perform_request(\n http=self.auth.http, request_obj=request_obj\n )\n return response", "def get_folder(self):\n name = \"%s_%s\" % (self.PREFIX, self.FOLDER_NAME)\n folders = self.mw.get_folders()\n for fldr in folders:\n if fldr[\"name\"] == name:\n self.folder_id = fldr[\"folder_id\"]\n return\n self.folder_id = self.mw.create_folder(name)", "def for_subfolder(self, value):\n return Folder(self.directory, subdirectory=value)", "def test_create_parentless_folder_from_json(self):\n my_folder = folder.Folder.from_json(None, _FOLDER_JSON)\n self.assertEqual('987', my_folder.id)\n self.assertEqual('folder', my_folder.type)\n self.assertEqual('folders/987', my_folder.name)\n self.assertEqual('My folder', my_folder.display_name)\n self.assertEqual('folder/987/', my_folder.full_name)\n self.assertEqual(folder.FolderLifecycleState.ACTIVE,\n my_folder.lifecycle_state)", "def create_folder(self, foldername, parents=''):\r\n formatted_parents = (parents + '/').replace('/', '%2F')\r\n\r\n return self.yandex_requests.create_folder(\r\n foldername, formatted_parents)", "def recursively_build_path(self, path_parts, parent_folder_id, ids=None):\n if ids is None:\n ids = []\n if len(path_parts) == 0:\n return ids\n else:\n pf = self.gi.libraries.create_folder(self.library_id, path_parts[0], base_folder_id=parent_folder_id)\n ids.append(pf[0]['id'])\n return self.recursively_build_path(path_parts[1:], pf[0]['id'], ids=ids)", "def add(self, name):\n new_folder = Folder(self.context)\n\n def _add_sub_folder():\n new_folder_url = \"/\".join([self.serverRelativeUrl, name])\n new_folder.set_property(\"ServerRelativeUrl\", new_folder_url)\n qry = CreateEntityQuery(self.folders, new_folder, new_folder)\n self.context.add_query(qry)\n\n self.ensure_property(\"ServerRelativeUrl\", _add_sub_folder)\n return new_folder", "def create_folder_structure(self):\n # create the parent folder holding the project\n self.proj_folder.mkdir(exist_ok=False)\n # once we have setup the parent folder we can create the subfolder\n # structure\n create_subfolder = [self.aiida_subfolder, self.env_subfolder]\n if self.has_source():\n create_subfolder += [self.src_subfolder]\n for subfolder in create_subfolder:\n project_subfolder = self.proj_folder / subfolder\n project_subfolder.mkdir(exist_ok=False)", "def get_or_create_blob_folder(folder_id, bucket=default_bucket):\n folder = f\"posts/{str(folder_id)}\"\n blob = None\n try:\n blob = bucket.get_blob(folder)\n if not blob:\n blob = bucket.blob(folder)\n except Exception as e:\n app.logger.debug('---------------------- get or create folder Exception ----------------------')\n pprint(e)\n app.logger.debug('---------------------- get or create folder Exception End ------------------')\n raise e\n return blob", "def create_folder(folders_to_create=[]):\n for f in folders_to_create:\n if not os.path.exists(f):\n os.makedirs(f)", "def find(\n self,\n folder: t.Union[str, Folder],\n create: bool = FolderDefaults.create,\n echo: bool = FolderDefaults.echo,\n ) -> Folder:\n root: FoldersModel = self.get()\n return root.find(folder=folder, create=create, refresh=False, echo=echo)", "def createFolder(self, title, description=\"\", index=None):\n assert isinstance(index, int) or index is None\n\n try:\n if index is None:\n url = self.metaData.getLink(\"create-folder\")\n else:\n url = self.getFolders()[index].getLink(\"create-folder\")\n\n header = self._baseHeader.copy()\n header['Content-Type'] = \"application/vnd.huddle.data+json\"\n\n skeletonFolder = {\"title\" : title, \"description\" : description}\n jsonString = json.dumps(skeletonFolder)\n response = self._adapter.postRequest(url, header, jsonString)\n\n return Folder(self._client, response['Headers']['location'])\n except IndexError:\n print(\"the index: \" + index + \" does not exist in the list of folder numbers we have\")", "def make_folder(path,folder_names):\n for folder in folder_names:\n if not os.path.exists(os.path.join(path,folder)):\n os.makedirs(os.path.join(path,folder))", "def create_folder(self, foldername: str) -> int:\n raise NotImplementedError", "def get(self) -> FoldersModel:\n root: FoldersModel = self._get()\n return root", "def create(path, overwrite=False):\n path = normpath(path)\n try:\n remote = get_remote(path)\n except ValueError: # Nothing exists at path, nothing to worry about.\n pass\n else:\n if isinstance(remote, RemoteFolder):\n pdbox.info(\"%s already exists\" % remote.uri)\n return remote\n elif not overwrite:\n raise ValueError(\"%s already exists\" % remote.uri)\n\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_create_folder_v2, path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n\n pdbox.info(\"Created new folder %s\" % dbx_uri(path))\n\n if not pdbox._args.get(\"dryrun\"): # Return the newly created folder.\n return RemoteFolder(None, meta=result.metadata)", "def create_hierarchy(parent_path, child_path):\n if parent_path:\n parent_skill_id = database_controller.get_skill(parent_path).id\n child_skill_id = database_controller.get_skill(child_path).id\n new_hierarchy = Hierarchy(parent_skill_id=parent_skill_id, child_skill_id=child_skill_id)\n db.session.add(new_hierarchy)\n db.session.commit()\n else:\n child_skill_id = database_controller.get_skill(child_path).id\n new_hierarchy = Hierarchy(child_skill_id=child_skill_id)\n db.session.add(new_hierarchy)\n db.session.commit()\n ''' \n new_hierarchy = Hierarchy()\n if parent_path:\n parent_skill = database_controller.get_skill(parent_path)\n new_hierarchy.parent_skill_assoc = parent_skill\n child_skill = database_controller.get_skill(child_path)\n new_hierarchy.child_skill_assoc = child_skill\n db.session.add(new_hierarchy)\n db.session.commit()\n '''", "async def new_folder(name):\n res = await joplin.create_folder(folder=name)\n return res.json()['id']", "def ensure_folder(*arg):\n if len(arg) == 0:\n raise Exception(\"No input to ensure_folder\")\n path = get_dir(Path(*arg))\n path.mkdir(parents=True, exist_ok=True)", "def createFolders(self, *args):\n for folder in args:\n mkdir(folder)" ]
[ "0.6284484", "0.5968514", "0.591213", "0.59022653", "0.5900601", "0.58238125", "0.5787846", "0.56719345", "0.5648211", "0.55549943", "0.5534397", "0.5528245", "0.55195063", "0.5439631", "0.5409075", "0.52300525", "0.5180839", "0.5164742", "0.51623046", "0.51155657", "0.51071566", "0.5083579", "0.49614227", "0.49461153", "0.49009287", "0.4880786", "0.48640564", "0.48444223", "0.4843404", "0.4837591" ]
0.7129039
0
Generate a dictionary of random product IDs and their prices
def generateProducts(self): # Creates items in each category for i in range(self.num_of_items): self.ID_DICT[i+self.num_of_items] = random.randint(1, 10) self.ID_DICT[i+self.num_of_items*2] = random.randint(1, 10) self.ID_DICT[i+self.num_of_items*3] = random.randint(1, 10) self.ID_DICT[i+self.num_of_items*4] = random.randint(1, 10) self.ID_DICT[i+self.num_of_items*5] = random.randint(1, 10) self.ID_DICT[i+self.num_of_items*6] = random.randint(1, 10) # Sort for easy selection sorted(self.ID_DICT) for product in self.ID_DICT.keys(): temp_int = self.ID_DICT[product] self.c.execute("INSERT INTO Products (ProductID, Price) VALUES (?, ?)", (product, self.ID_DICT[product])) self.conn.commit() if self.print_items: print("\nAll items in store:") print(self.ID_DICT) print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _initialize_products(self, products: List) -> Dict[str, int]:\n\n product_request = urllib.request.Request(url=URL_PRODUCTS, headers={'User-Agent': URL_USER_AGENT})\n product_response = urllib.request.urlopen(product_request)\n all_products = json.load(product_response)\n\n product_details = {}\n\n for product in products:\n for cbpro_product in all_products:\n if cbpro_product[\"id\"] == product:\n quote_increment = float(cbpro_product[\"quote_increment\"])\n num_decimal_places = int(math.log10(1 / quote_increment))\n product_details[product] = num_decimal_places\n logging.debug(\n \"Retrieved quote increment for {}: {} = {} decimal places\".\n format(product, quote_increment, num_decimal_places))\n\n return product_details", "def get_products_dict(products):\n # lang = get_language()[:2]\n lang = ''\n products_dict = {}\n try:\n if products and products[0].get('source') == 'greedy':\n for product in products:\n key = product['name']\n products_dict[key] = products_dict.get(key, {})\n products_dict[key].setdefault('products', []).append(key)\n products_dict[key]['price'] = products_dict[key].get('price', 0) + product['net_price']\n else:\n product_objs = list(Product.objects.using('slave').in_bulk([p['product_id'] for p in products]).values())\n bundled_products = []\n for product in product_objs:\n for bundled_product in product.bundled.all():\n bundled_product.price = 0\n bundled_products.append(bundled_product)\n product_objs.extend(bundled_products)\n for product in product_objs:\n key = getattr(product.parent, 'name_%s' % lang)\n products_dict[key] = products_dict.get(key, {\n 'expire_in': product.expire_in,\n 'never_expire': product.never_expire\n })\n products_dict[key].setdefault('products', []).append(mark_safe(product.name))\n products_dict[key]['price'] = products_dict[key].get('price', 0) + product.price\n # Convert it to a format which is easy to handle in email templates\n products_dict = [{\n 'title': key,\n 'body': value,\n } for key, value in products_dict.items()]\n except (ValueError, KeyError, AttributeError):\n products_dict = list({'title': p['name'], 'body': {'expire_in': None, 'never_expire': None}} for p in products)\n\n return products_dict", "def get_prices_dict(name,products,sales):\r\n return {x:(1-sales[name])*products[x] for x in products}", "def get_products(self) -> dict:\n\t\tproducts = dict()\n\n\t\tdb = Database()\n\t\tdb.create_connection(self._file_path)\n\t\trows = db.get_products()\n\t\tdb.close_connection()\n\n\t\tfor row in rows:\n\t\t\tif row[0] not in products:\n\t\t\t\ttry:\n\t\t\t\t\tproducts[row[0]] = Product(row[0], row[1], row[2], row[3]) # code, price, lastupdate, currency\n\t\t\t\texcept Exception as e: \n\t\t\t\t\t# IF the database was not correct parsed, the item will be discarted, \n\t\t\t\t\t# the event will be logged in the log file and the program will continue\n\t\t\t\t\tlogging.error(str(datetime.now())+': ' + e)\n\t\t\t\t\tcontinue\n\n\t\treturn products", "def generate_products():\n # initialize list of noun and adj\n num_products = 30\n products = [0] * num_products\n prices = [0] * num_products\n weights = [0] * num_products\n flammabilities = [0] * num_products\n\n # initlize random word object\n random = RandomWords()\n\n adj = [random.get_random_word(includePartOfSpeech=\"adjective\")\n for product in products]\n noun = [random.get_random_word(includePartOfSpeech=\"noun\")\n for product in products]\n products = [noun + \" \" + adj for noun, adj in zip(adj, noun)]\n\n prices = [random.randint(5, 100) for price in prices]\n weights = [random.randint(5, 100) for weight in weights]\n flammabilities = [random.randint(0.0, 2.5)\n for flammability in flammabilities]\n\n return products, prices, weights, flammabilities", "def generate_products(self = random.sample, name = random.choice(result), price = random.randint(5, 100), weight = random.randint(5, 100), \nflammability= random.uniform(0, 2.5)):\n return sample", "def list_all_products(*args):\n logger.info(f\"Perparing dict of all products...\")\n all_products_dict = {}\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n products = mdb[\"product\"]\n all_products = products.find({})\n for product in all_products:\n product_id = product[\"product_id\"]\n del product[\"_id\"]\n del product[\"product_id\"]\n all_products_dict[product_id] = product\n return all_products_dict", "def generate_random_data() -> dict:\n data = {\n \"_pl\": {\n \"userId\": uuid.uuid4().__str__(),\n \"sensorValue\": random.random(),\n \"sensorId\": \"\".join(random.choices(string.ascii_lowercase + string.digits, k=5))\n + \"-\"\n + \"\".join(random.choices(string.ascii_lowercase + string.digits, k=10))\n + \"-\"\n + \"\".join(random.choices(string.ascii_lowercase + string.digits, k=10))\n }\n }\n return data", "def update_random_product_price(product_id, data):\n return woo_request_helper().put_details(wc_endpoint='products/{}'.format(product_id), params=data)", "def generate_products(num_products=30):\n products = []\n for item in range(0, num_products):\n gen_name = str(random.choice(ADJECTIVES) + \" \" + random.choice(NOUNS))\n price = random.uniform(5, 100)\n weight = random.uniform(5, 100)\n flammability = random.uniform(0.0, 2.5)\n products.append(Product(name=gen_name,\n price=price, weight=weight,\n flammability=flammability))\n return products", "def generate_products(n=30, price_range=(5, 10), weight_range=(5, 100)):\n products = []\n for i in range(1, n + 1):\n name = random.choice(ADJECTIVES) + ' ' + random.choice(NOUNS)\n price = random.randrange(price_range[0], price_range[1] + 1)\n weight = random.randrange(weight_range[0], weight_range[1] + 1)\n flammability = random.uniform(0.0, 2.5)\n product = Product(name, price, weight, flammability)\n products.append(product)\n return products", "def createFakePriceDatabase(routes):\n\n\ttransformedArray = transformArray(routes)\n\tpriceDatabase = {key:[int(round(uniform(20, 180))) for _ in range(364)] for key in transformedArray}\n\n\treturn priceDatabase", "def poster_list_products(products):\r\n print('\\n Choisir un produit : ')\r\n dict_product = {}\r\n index = 1\r\n\r\n for i in products:\r\n poster_products = cl.Food(i, index)\r\n dict_product[poster_products.index] = poster_products.name\r\n print(index, \" : \", poster_products.name)\r\n index += 1\r\n return dict_product", "def generate_products(num_products=30):\r\n products = []\r\n for i in range(num_products):\r\n name = sample(ADJECTIVES, 1)[0] + ' ' + sample(NOUNS, 1)[0]\r\n price = randint(5, 100)\r\n weight = randint(5, 100)\r\n flammability = uniform(0.0, 2.5)\r\n products.append(Product(name, price=price, weight=weight,\r\n flammability=flammability))\r\n return products", "def __init__(self, data):\n self.products = dict()\n for item in data:\n style_number = item[\"Style\"]\n\n if style_number not in self.products:\n product = {\"price\": item[\"price\"]}\n self.products[style_number] = product", "def show_available_products(*args):\n logger.info(f\"Preparing dict of available prodcuts...\")\n available_products = {}\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n products = mdb[\"product\"]\n for doc in products.find():\n del doc[\"_id\"]\n if int(doc[\"quantity_available\"]) > 0:\n product_id = doc[\"product_id\"]\n del doc[\"product_id\"]\n available_products[product_id] = doc\n\n return available_products", "def get_price():\n return uniform(1.0, 350.0)", "def poster_product_list(product):\r\n print('\\n Séléctionner un product : ')\r\n dict_produit = {}\r\n index = 1\r\n for i in product:\r\n poster_product = cl.Food(i, index)\r\n dict_produit[poster_product.index] = poster_product.name\r\n print(index, \" : \", poster_product.name)\r\n index += 1\r\n return dict_produit", "def get_data(self):\n return {\n self.PRODUCT_RANGE_ID: self.product_id,\n self.SALES_CHANNEL_ID: self.SALES_CHANNEL_ID_VALUE,\n }", "def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})", "def show_available_products():\n products = DATABASE['product'].find({'quantity_available': {'$ne':'0'}})\n products_dict = {prod['product_id']:\n {'description': prod['description'],\n 'product_type': prod['product_type'],\n 'quantity_available': int(prod['quantity_available'])}\n for prod in products}\n return products_dict", "def get_price_list(self):\n # Fetch the resource info then get a copy of it\n self._res_man.collect_resource_info()\n res_list = self._res_man.get_resource_info()\n # Pack the price response!\n price_response = PriceResponse()\n for res in res_list:\n price = res.post_price()\n res_type = res.get_res_type()\n if res_type == ResourceType.LINK:\n src_zone_id, dst_zone_id = res.get_src_and_dst()\n price_response.add_link_price(\n src_zone_id,\n dst_zone_id,\n price,\n res.get_max_valuation()\n )\n else:\n price_response.add_resource_price(\n res.get_zone_id(),\n res_type,\n price,\n res.get_max_valuation()\n )\n # Save prices for later, if needed\n if self._use_price_token:\n # Generate token\n self._num_req += 1\n token = hash(str(self._num_req))\n price_response.set_price_token(token)\n # Get mapping\n price_mapping = self._res_man.get_res_to_price_mapping()\n # Get priority\n pri = time() + self._token_duration\n with self._history_lock:\n self._hist_q.put((pri, token))\n self._price_history[token] = price_mapping\n logger.debug(\n f'Saved prices with token {token} '\n f'for {self._token_duration} seconds'\n )\n return price_response", "def customers_renting_product(product_id):\n logger.info(f\"Perparing rental dict for product_id: {product_id}...\")\n users_renting_product = []\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n\n rentals = mdb[\"rental\"]\n customers = mdb[\"customers\"]\n query = {\"product_id\": product_id}\n\n # First we get a list of customers for the specified product_id\n for rental in rentals.find(query):\n # Now we get customer details from customers via user_id\n query = {\"user_id\": rental[\"user_id\"]}\n logger.info(rental[\"user_id\"])\n\n for customer in customers.find(query):\n logger.info(customer)\n del customer[\"_id\"]\n users_renting_product.append(customer)\n\n return users_renting_product", "def __get_deal_price(self):\n return self.create_random_decimal(min=1, max=100000)", "def get_products_statistics(loop, products):\n description_placeholder = config[\"placeholders\"][\"description\"]\n img_url_placeholder = config[\"placeholders\"][\"img_url\"]\n\n # collect offers for all products asynchronously\n futures = [get_offers_async(product[\"productId\"]) for product in products]\n all_offers = loop.run_until_complete(asyncio.gather(*futures))\n\n for product, offers in zip(products, all_offers):\n product[\"normalized_title\"] = clean_string(product[\"title\"])\n product[\"min_price\"] = sys.maxsize\n product[\"max_price\"] = 0\n product[\"description\"] = description_placeholder\n product[\"img_url\"] = img_url_placeholder\n\n for offer in offers:\n product[\"min_price\"] = min(product[\"min_price\"], offer[\"price\"])\n product[\"max_price\"] = max(product[\"max_price\"], offer[\"price\"])\n\n if product[\"description\"] == description_placeholder and offer.get(\"description\"):\n product[\"description\"] = offer[\"description\"]\n\n if product[\"img_url\"] == img_url_placeholder and offer.get(\"img_url\"):\n product[\"img_url\"] = offer[\"img_url\"]\n\n return products", "def get_products():\n products = db.session.query(Product).all()\n product_details = {}\n\n for product in products:\n product_details[product.product_id] = product.name\n\n return jsonify(product_details)", "def create_products(num):\n return [''.join(random.choices('ABCDEFG123', k=3)) for _ in range(num)]", "def _all_products(q_1: Q, q_2: Q) -> Dict:\n\n all_dict = _commuting_products(q_1, q_2)\n all_dict.update(_anti_commuting_products(q_1, q_2))\n\n return all_dict", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n \n data = response.css('script[type~=\"application/ld+json\"]::text').get()\n # Removes indent\n data = re.sub(r'[\\n\\t]', '', data)\n data = eval(data)\n \n item['product_name'] = data['name']\n item['price'] = data['offers']['price']\n item['currency'] = data['offers']['priceCurrency']\n item['product_id'] = data['productID']\n \n # seed number\n item['raw_string'] = response.css('.seed-number::text').get().strip()\n return item", "def p():\n args = {'product_id' : 1, 'sku': 'abc', 'upc': 'def',\n 'name' : 'hello', 'description' : 'xfsef', \n 'category1' : 'sdfds', 'category2' : 'dsfssaa',\n 'storage' : 'afas', 'keywords' : '32423ssdf', \n 'quantity' : 3240, 'price': 23234, 'item_weight' : 23423,\n 'item_weight_unit' : 'aefewa', 'item_volume' : 12.3,\n 'item_volume_unit' : 'sfds4', 'expiry_date': '02/02/20', \n 'items_per_case' : 2343, \n 'case_wt' : 324234, 'case_wt_unit' : 'safa', 'case_dim' : '3ags',\n 'case_dim_unit' : 'sdfs', 'photo1' : 'sdfsf34', 'photo2' : 'sdfgs',\n 'photo3' : 'sdgfsdrf', 'created' : '2020-01-02 34:23:34', \n 'last_updated' : '2024-34-34 34.12.34' }\n return Product(**args)" ]
[ "0.6971659", "0.6540291", "0.6502809", "0.62412894", "0.6076976", "0.6033557", "0.59931135", "0.59761584", "0.5946876", "0.59340703", "0.59294564", "0.5865452", "0.5837422", "0.5778919", "0.5731991", "0.5731828", "0.5689985", "0.5683067", "0.5670007", "0.5662804", "0.5661343", "0.563683", "0.560308", "0.5598898", "0.55949", "0.55819756", "0.5579578", "0.5548886", "0.5547325", "0.5536292" ]
0.68286663
1
Given floats used for weighted choices, random purchases are made for a customer
def generatePurchases(self, num_of_purchases, food, medical, electronics, outdoors, clothing, beauty, customer): # Empty purchases self.customer_purchases = [] # Customer is *likely* to buy from some categories, but anything can happen weighted_categories = [('Food', food), ('Medical', medical), ('Electronics', electronics), ('Outdoors', outdoors), ('Clothing', clothing), ('Beauty', beauty)] randomCategory = [val for val, cnt in weighted_categories for i in range(cnt)] # Buy items for i in range(num_of_purchases): choice = random.choice(randomCategory) if choice == 'Food': tempIndex = random.randint(self.num_of_items, self.num_of_items*2-1) self.customer_purchases.append(tempIndex) elif choice == 'Medical': tempIndex = random.randint(self.num_of_items*2, self.num_of_items*3-1) self.customer_purchases.append(tempIndex) elif choice == 'Electronics': tempIndex = random.randint(3*self.num_of_items, 4*self.num_of_items-1) self.customer_purchases.append(tempIndex) elif choice == 'Outdoors': tempIndex = random.randint(4*self.num_of_items, 5*self.num_of_items-1) self.customer_purchases.append(tempIndex) elif choice == 'Clothing': tempIndex = random.randint(5*self.num_of_items, 6*self.num_of_items-1) self.customer_purchases.append(tempIndex) elif choice == 'Beauty': tempIndex = random.randint(6*self.num_of_items, 7*self.num_of_items-1) self.customer_purchases.append(tempIndex) if self.print_customers: print(self.customer_purchases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateCustomers(self):\r\n\r\n # Counters\r\n shoppers = 0\r\n models = 0\r\n oldl = 0\r\n oldf = 0\r\n doctor = 0\r\n nudist = 0\r\n hippie = 0\r\n nerd = 0\r\n\r\n for i in range(self.num_of_customers):\r\n\r\n # With these weights, our store has plenty of youngs and olds, but few mids\r\n # Most grocery shoppers come in the evening\r\n # Young people have equal distribution between morning and evening\r\n # etc\r\n age1 = random.randint(18, 28)\r\n age2 = random.randint(28, 50)\r\n age3 = random.randint(50, 85)\r\n weighted_ages = [(age1, 10), (age2, 2), (age3, 15)]\r\n randomAge = [val for val, cnt in weighted_ages for a in range(cnt)]\r\n\r\n hour1 = random.randint(8, 13)\r\n hour2 = random.randint(13, 18)\r\n hour3 = random.randint(18, 22)\r\n weighted_hours = [(hour1, 10), (hour2, 3), (hour3, 20)]\r\n randomHour = [val for val, cnt in weighted_hours for b in range(cnt)]\r\n\r\n age = random.choice(randomAge)\r\n hour = random.choice(randomHour)\r\n gender = random.choice(['M', 'M', 'M', 'M', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F'])\r\n\r\n # Base chances, 100 total\r\n gs, sm, hp, ol, nrd, of, sd, nud = 20, 5, 5, 5, 5, 5, 10, 10\r\n\r\n customerID = random.randint(0, self.num_of_customers*2)\r\n while customerID in self.all_customers:\r\n customerID = random.randint(0, self.num_of_customers*2)\r\n\r\n # Weights\r\n if 18 < age < 22:\r\n if gender == 'M':\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd, hp = 2, 2, 35, 20\r\n elif 13 <= hour <= 17:\r\n ol, sm, nrd, hp, gs = 2, 2, 15, 30, 5\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs = 2, 2, 50\r\n\r\n elif gender == 'F':\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd = 5, 35, 15\r\n elif 13 <= hour <= 17:\r\n ol, sm, hp = 5, 30, 30\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs, = 5, 25, 50\r\n\r\n elif gender == 'M' and 22 < age < 29:\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd, hp = 5, 5, 35, 25\r\n elif 13 <= hour <= 17:\r\n ol, sm, nrd, hp = 5, 5, 35, 40\r\n elif 18 <= hour <= 22:\r\n ol, sm, nrd, hp, gs = 5, 5, 20, 20, 50\r\n\r\n elif gender == 'M' and 29 < age < 50:\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd, gs = 5, 5, 40, 30\r\n elif 13 <= hour <= 17:\r\n ol, sm, nrd = 5, 5, 30\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs = 5, 5, 70\r\n\r\n elif gender == 'M' and age > 50:\r\n if 8 <= hour <= 12:\r\n ol, sm, gs, of, hp = 5, 5, 30, 60, 20\r\n elif 13 <= hour <= 17:\r\n ol, sm, gs, of, hp = 5, 5, 15, 70, 20\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs, of, hp = 5, 5, 50, 25, 20\r\n\r\n elif gender == 'F' and 22 < age < 35:\r\n if 8 <= hour <= 12:\r\n ol, sm, hp, gs = 5, 30, 30, 30\r\n elif 13 <= hour <= 17:\r\n ol, sm, hp, gs = 5, 30, 30, 15\r\n elif 18 <= hour <= 22:\r\n ol, sm, hp, gs = 5, 15, 25, 60\r\n\r\n elif gender == 'F' and 35 < age < 55:\r\n if 8 <= hour <= 12:\r\n ol, sm, hp, gs = 5, 5, 5, 40\r\n elif 13 <= hour <= 17:\r\n ol, sm, hp, gs = 25, 5, 5, 25\r\n elif 18 <= hour <= 22:\r\n ol, sm, hp, gs = 30, 5, 5, 40\r\n\r\n elif gender == 'F' and age > 55:\r\n if 8 <= hour <= 12:\r\n ol, sm, of, gs = 20, 5, 15, 30\r\n elif 13 <= hour <= 17:\r\n ol, sm, of, gs = 60, 5, 30, 15\r\n elif 18 <= hour <= 22:\r\n ol, sm, of, gs = 40, 5, 20, 40\r\n\r\n weighted_choices = [('Grocery Shopper', gs), ('Supermodel', sm), ('Hippie', hp), ('Old Lady', ol), ('Nerd', nrd), ('Self Doctor', sd), ('Nudist', nud), ('Old Fart', of)]\r\n randomType = [val for val, cnt in weighted_choices for n in range(cnt)]\r\n\r\n customer = random.choice(randomType)\r\n\r\n if customer == 'Grocery Shopper':\r\n shoppers += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 18\r\n medicalChance = 3\r\n electronicsChance = 1\r\n outdoorsChance = 1\r\n clothingChance = 1\r\n beautyChance = 2\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Supermodel':\r\n models += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 0\r\n medicalChance = 5\r\n electronicsChance = 0\r\n outdoorsChance = 0\r\n clothingChance = 10\r\n beautyChance = 13\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Hippie':\r\n hippie += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 6\r\n medicalChance = 2\r\n electronicsChance = 1\r\n outdoorsChance = 14\r\n clothingChance = 7\r\n beautyChance = 1\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Old Lady':\r\n oldl += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 6\r\n medicalChance = 8\r\n electronicsChance = 0\r\n outdoorsChance = 0\r\n clothingChance = 3\r\n beautyChance = 10\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Nerd':\r\n nerd += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 4\r\n medicalChance = 3\r\n electronicsChance = 14\r\n outdoorsChance = 0\r\n clothingChance = 2\r\n beautyChance = 1\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Self Doctor':\r\n doctor += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 5\r\n medicalChance = 32\r\n electronicsChance = 4\r\n outdoorsChance = 1\r\n clothingChance = 2\r\n beautyChance = 1\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Nudist':\r\n nudist += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 10\r\n medicalChance = 5\r\n electronicsChance = 0\r\n outdoorsChance = 14\r\n clothingChance = 0\r\n beautyChance = 0\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Old Fart':\r\n oldf += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 10\r\n medicalChance = 18\r\n electronicsChance = 5\r\n outdoorsChance = 3\r\n clothingChance = 3\r\n beautyChance = 0\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n itemsBought = (\", \".join(repr(e) for e in self.customer_purchases))\r\n self.c.execute(\"INSERT INTO Customer (CustomerID, Hour, Age, Gender, Items) VALUES (?, ?, ?, ?, ?)\", (customerID, hour, age, gender, itemsBought))\r\n self.conn.commit()\r\n\r\n if self.print_counters:\r\n print(\"\\nShoppers:\", shoppers)\r\n print(\"Models:\", models)\r\n print(\"Old Ladies:\", oldl)\r\n print(\"Old Farts:\", oldf)\r\n print(\"Self doctors:\", doctor)\r\n print(\"Nerds:\", nerd)\r\n print(\"Hippies:\", hippie)\r\n print(\"Nudists:\", nudist)\r\n\r\n if self.print_customers:\r\n print(\"\\nRaw Customer Data: \")\r\n print(self.all_customers)", "def generate_products(self = random.sample, name = random.choice(result), price = random.randint(5, 100), weight = random.randint(5, 100), \nflammability= random.uniform(0, 2.5)):\n return sample", "def weighted_choice(weighted_items, num_items=1):\n total = 0\n cume_list = []\n\n for item, weight in weighted_items.items():\n total += weight\n cume_list.append([item, total])\n\n for pair in cume_list:\n pair[1] /= total\n\n items = []\n\n for _ in range(num_items):\n rand = random()\n\n for item, val in cume_list:\n if rand <= val:\n items.append(item)\n break\n\n assert num_items == len(items), (weighted_items, items)\n\n if num_items == 1:\n return items[0]\n\n return items", "def weightedrandomchoice(items): # {{{2\n total = 0\n items.sort(reverse=True, key=lambda x:x[0])\n for item in items:\n total += item[0]\n threshold = random.uniform(0, 0.6) * total\n for item in items:\n threshold -= item[0]\n if threshold <= 0:\n return item[1]", "def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item", "def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item", "def weighted_choice(items: List[Tuple[str, float]]) -> str:\r\n total_weight = sum(item[1] for item in items)\r\n n = random.uniform(0, total_weight)\r\n for item, weight in items:\r\n if weight > n:\r\n return item\r\n n -= weight\r\n return item", "def weighted_choice(weights):\n totals = []\n running_total = 0\n\n for w in weights:\n running_total += w\n totals.append(running_total)\n\n rnd = random.random() * running_total\n for i, total in enumerate(totals):\n if rnd < total:\n return i", "def generate_customer(self):\n customer_rates = np.random.multivariate_normal(\n mean=self.behave_means, cov=self.behave_cov\n )\n customer_rates = customer_rates.clip(\n min=self.min_rate\n ) # clip : no negative rates!\n new_customer = Customer(customer_rates)\n # print(customer_rates)\n return new_customer", "def weighted_choice(*values, **kwargs):\n key = kwargs.get('key', lambda x: 1.0)\n if len(values) == 1:\n values = values[0]\n if len(values) == 0:\n raise TypeError('weighted_choice expected 1 arguments, got 0')\n\n weights = [key(v) for v in values]\n s = sum(weights)\n r = random.random() * s\n for v,w in zip(values, weights):\n s -= w\n if r > s:\n return v\n return values[-1]", "def _random_customer(cust_dtls) -> tuple:\n return choices(cust_dtls)[0]", "def weightedChoice(weights, objects, apply_softmax=False, alpha=None):\n if apply_softmax: weights = softmax(weights)\n if alpha: weights = normalize([w**alpha for w in weights])\n cs = np.cumsum(weights) #An array of the weights, cumulatively summed.\n idx = sum(cs < np.random.rand()) #Find the index of the first weight over a random value.\n idx = min(idx, len(objects)-1)\n return objects[idx]", "def random_choice(options, weights): \n r = random.random()\n for i, c in enumerate(cumsum(weights)):\n if r <= c:\n return options[i]", "def make_drink ():\n \n customer_pref = customer_order.drink_order()\n drink = []\n \n for pref in customer_pref:\n if customer_pref[pref] == True:\n drink.append(random.choice(ingredients[pref]))\n \n return drink", "def select_item(items, weights, k):\n x = random.choices(items, weights=weights, k=k)\n return x", "def _weighted_choice(self, lst):\n \n total_weight = reduce(lambda x,y:x+y, [tup[1] for tup in lst])\n n = random.uniform(0, total_weight)\n for item, weight in lst:\n if n < weight:\n break\n n = n - weight\n return item", "def weighted_choice(choices):\n return choice(sum(([value]*weight for value, weight in choices), []))", "def choice(population,weights):\r\n\tassert len(population) == len(weights)\r\n\tcdf_vals=cdf(weights)\r\n\treturn population[bisect.bisect(cdf_vals, random.random())]", "def weighted_random_item(items, weight):\n if not items:\n return None\n\n weight_sum = sum(weight(item) for item in items)\n if weight_sum <= 0:\n return None\n\n choice = random.random() * weight_sum\n for item in items:\n choice -= weight(item)\n if choice < 0:\n return item, weight(item) / weight_sum\n return items[-1], -1 # floating-point rounding error", "def weighted_choice(options: np.ndarray, weights: np.ndarray) -> Union[int,float]:\n assert len(options) == len(weights) != 0\n total = np.sum(weights)\n rand = np.random.rand() * total\n for i in range(len(options)):\n option = options[i]\n weight = weights[i]\n if weight < rand:\n rand -= weight\n else:\n break\n return option", "def sample_from(self, weights):\n total = sum(weights)\n rnd = total * random.random() # uniform between 0 and total\n for i, w in enumerate(weights):\n rnd -= w # return the smallest i such that\n if rnd <= 0:\n return i # weights[0] + ... + weights[i] >= rnd", "def weighted_bright():\n return np.random.choice(\n [17, 18, 19, 20, 21] * 1 +\n [22, 23, 24, 25, 26] * 2 +\n [27, 28, 29, 30, 31] * 3\n )", "def selection_wheel(self, weighted_population):\n weight_total = sum((item[1] for item in weighted_population))\n n = random.uniform(0, weight_total)\n for item, weight in weighted_population:\n if n < weight:\n return item\n n = n - weight\n return item", "def buy(self, price):\n return np.random.binomial(1, self.conversion_rate(price))", "def sampleWeight(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n #Distribute the exploration weight evenly among all the actions that have been\r\n #taken up to this point in time by any of the users\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n #Compute the normalization factor. If no action has been sampled by this user yet,\r\n #then each action k has weight eta*pi_k, where pi_k is the weight of k in the\r\n #prior distribution. Then, the normalization factor is the sum(eta*pi_k) for all k,\r\n #which is equal to eta*sum(pi_k), which is just eta, since the sum of the previous\r\n #weights has to add up to 1.\r\n #If one or more actions have been already sampled, the normalization factor is the\r\n #sum of 1) the weights already in self.weights, 2) the exploration fund, and 3) the\r\n #weights of the actions that are not yet in self.weights. Each one of these actions\r\n #has weight eta*pi_k (because it hasn't been sampled yet), so the total weight of the\r\n #mass of actions not yet in self.weights is eta*(1-sum(pi_l)), where the sum is over all\r\n #the weights already in self.weights\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n #Keep getting the next weight until the combined mass of the weights is less than the\r\n #random number x\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return w\r\n i += 1", "def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)", "def weighted_choice(choices, weight):\n\t# requirements = random\n\tweights = []\n\t# get weight values for each of the choices\n\tfor choice in choices:\n\t\tchoice_weight = weight(choice)\n\t\tif not (isinstance(choice_weight, int) and choice_weight > 0):\n\t\t\traise TypeError('weight results must be positive integers')\n\t\tweights.append(choice_weight)\n\n\t# make a selection within the acceptable range\n\tselection = random.randint(0, sum(weights) - 1)\n\n\t# find and return the corresponding choice\n\tfor idx, choice in enumerate(choices):\n\t\tif selection < sum(weights[:idx + 1]):\n\t\t\treturn choice\n\traise RuntimeError('no selection could be made')", "def weighted_choice(self, probabilities, key):\n\n total = sum(x[0] for x in probabilities)\n choice = total * self._random(key)\n\n for probability, option in probabilities:\n choice -= probability\n if choice <= 0:\n return option", "def generateSupplies(self):\n typePart = ['wrench','resistor','bulb','mushroom','coin']\n chosenPart = []\n for i in range(3):\n randomPart = choice(typePart)\n chosenPart.append(randomPart)\n typePart.remove(randomPart)\n for part in chosenPart:\n amount = randint(1,3)\n self._supplies.append(Node(part,amount))", "def result(request):\n def weighted_choice(choices):\n total = sum(c.number_requests for c in choices)\n r = random.uniform(0, total)\n upto = 0\n for c in choices:\n if upto + c.number_requests >= r:\n return c\n upto += c.number_requests\n assert False, \"Shouldn't get here\"\n restaurants = Restaurant.objects.exclude(number_requests=0)\n lenr = len(restaurants)\n if not restaurants:\n return HttpResponse(\"No restaurants have been requested\")\n # select random restaurant from the database\n restaurant = weighted_choice(restaurants)\n name = restaurant.name\n # set number_requests to 0\n restaurant.number_requests = 0\n restaurant.save()\n\n # render it to the screen\n return render(request, \"foodsite/result.html\", {\"restaurant\": name})" ]
[ "0.68680906", "0.67529637", "0.65332425", "0.6512736", "0.64413524", "0.6347539", "0.63373226", "0.6335294", "0.627076", "0.62083924", "0.61907333", "0.6163172", "0.6128276", "0.6093737", "0.6055325", "0.6027389", "0.6019666", "0.6015048", "0.60027325", "0.5991202", "0.5886684", "0.5885721", "0.5883686", "0.5876767", "0.5859793", "0.5852244", "0.5834411", "0.5781739", "0.5769738", "0.5765691" ]
0.7328029
0
Generate employees, each with a name, clock in, clock out, and wage
def generateEmployees(self): # Name maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen'] femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse'] for i in range(self.num_of_employees): # Clock in an hour before opening, 6 hours after, or 12 hours after clockIn = random.choice([7, 13, 19]) # Clock out after 5 hours, 10 hours, or 15 hours clockOut = random.choice([13, 19, 23]) while clockOut <= clockIn: clockOut = random.choice([13, 19, 23]) # Hourly wage wage = random.choice([8, 9, 10, 12, 20]) gender = random.choice(['M', 'F']) if gender == 'M': name = random.choice(maleNames) else: name = random.choice(femaleNames) self.c.execute("INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)", (name, clockIn, clockOut, wage)) self.conn.commit() if self.print_employees: print("\nName:", name) print("Clock in:", clockIn) print("Clock out:", clockOut) print("Wage:", wage)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_emp_man_hours(self):\n start = timezone.make_aware(dt.datetime(2016, 6, 3, 6, 30))\n stop = timezone.make_aware(dt.datetime(2016, 6, 3, 10, 30))\n emp_hours = 0\n\n expected_emp_hours = 20.95\n\n # getting employee objects that are clocked in\n clocked_in_emp = get_clocked_in(start)\n emp_that_left = get_emp_who_left_during_shift(start, stop)\n emp_that_breaked = get_emp_who_left_on_break(start, stop)\n\n # testing return of number of hours\n for employee in clocked_in_emp:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n for employee in emp_that_left:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n for employee in emp_that_breaked:\n print(\"EMP= \", employee.PRSN_NBR_TXT)\n emp_hour = get_emp_man_hours(employee, start, stop)\n print(\"EMP HOUR= \", emp_hour)\n emp_hours += emp_hour\n\n self.assertAlmostEqual(emp_hours, expected_emp_hours)", "def employees_earning(table):\n\n product_index = 1\n employee_id_index = 2\n amount_sold_index = 4\n\n person_id_index = 0\n person_name_index = 1\n\n game_index = 0\n price_index = 3\n\n store_table = store.get_table()\n store.check_table(store_table)\n hr_table = hr.get_table('model/hr/persons.csv')\n money_earned = {}\n for person in hr_table:\n person_id = person[person_id_index]\n person_name = person[person_name_index]\n money_earned[person_name] = 0\n for record in table:\n product_id = record[product_index]\n employee_id = record[employee_id_index]\n amount_sold = int(record[amount_sold_index])\n if person_id == employee_id:\n for game in store_table:\n game_id = game[game_index]\n if game_id == product_id:\n game_price = int(game[price_index])\n money_earned[person_name] += int(amount_sold * game_price)\n return money_earned", "def make_hourly(self,rate,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"1\"\n print(\"{}{}\".format(name,\" was successfully changed to be an hourly employee\"))\n self.emp_dict[id][8] = rate\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def employee_data(self):\n self.paymethod()\n self.classification()\n for i in self.emp_id:\n if self.clsf[i] == \"Salaried\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][7]]\n elif self.clsf[i] == \"Hourly\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][8],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][8]]\n elif self.clsf[i] == \"Commissioned\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][9],\n self.emp_dict[i][10],self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3],self.emp_dict[i][4],self.emp_dict[i][7],self.emp_dict[i][9]]\n else:\n print(\"Error\")\n print(self.emp_data)\n return self.emp_data", "def create_employee_structure(employees):\n employees_dict = {}\n for employee in position_sort(employees):\n if not employee.is_secretary:\n adder(employees_dict, employee.prosecutors_office, {'employees': [], 'departments': {}, 'divisions': {}})\n if employee.prosecutors_office and employee.department and employee.division:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'divisions', {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office and employee.department:\n adder(employees_dict[employee.prosecutors_office]['departments'], employee.department, {})\n adder(employees_dict[employee.prosecutors_office]['departments'][employee.department], 'employees', [])\n employees_dict[employee.prosecutors_office]['departments'][employee.department]['employees'].append(employee)\n elif employee.prosecutors_office and employee.division:\n adder(employees_dict[employee.prosecutors_office]['divisions'], employee.division, [])\n employees_dict[employee.prosecutors_office]['divisions'][employee.division].append(employee)\n elif employee.prosecutors_office:\n employees_dict[employee.prosecutors_office]['employees'].append(employee)\n return employees_dict", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def atten_employee(list_emp, name):\r\n with open(\"attendance_log.txt\", \"w\") as attendance_by_emp:\r\n attendance_by_emp.seek(0)\r\n attendance_by_emp.write(\"Employee Attendance Report:\\n\")\r\n for worker in list_emp:\r\n if worker.name == name:\r\n attendance_by_emp.write(\"%s-\\n\" % worker.name)\r\n for date in worker.attendance:\r\n attendance_by_emp.write(\"\\t\" + date + '\\n')\r\n print(\"Report issued!\\n\")\r\n return\r\n print(\"%s is not in employee log\\n\" % name)\r\n return", "def generate_salary_list(Hall):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, ('Hall Salary List: Hall %s' % Hall.hall_ID))\n pdf.ln()\n\n worker_list = dbr.rebuild(\"worker\")\n title = \"Role\"\n wage = 0\n for key in worker_list:\n if worker_list[key].hall_ID == Hall.hall_ID:\n if isinstance(worker_list[key], mess_manager.MessManager):\n title = \"Mess Manager\"\n wage = worker_list[key].monthly_salary\n elif isinstance(worker_list[key], clerk.Clerk):\n title = \"Clerk\"\n wage = worker_list[key].monthly_salary\n elif isinstance(worker_list[key], attendant.Attendant):\n title = \"Attendant\"\n wage = worker_list[key].daily_wage\n\n pdf.multi_cell(0, 5, ('%s: %s (%s) - Rs. %s' % (worker_list[key].worker_ID,\n worker_list[key].name, title, wage)))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('hall_salary_%s.pdf' % Hall.hall_ID), 'F')", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def retrieve_teams():\n #print \"Print the number of teams and the members on team\"\n employee_list_total = []\n employee_number_list = []\n\n # List for keeping used numbers\n for temp in range(1000, 3000):\n employee_number_list.append([None, False]) \n\n # Read how many teams that shall be given\n stdin_input = sys.stdin.readline()\n \n try:\n # Test if input was numeric\n no_of_teams = int(stdin_input)\n \n input_rows = []\n \n # Read in all teams from stdin\n for i in range(0, no_of_teams):\n input_rows.append(sys.stdin.readline())\n \n except ValueError:\n print \"Error: Wrong input format\"\n sys.exit()\n\n for row in input_rows:\n # Split team into two members\n team = row.split()\n\n # Test if two members are given\n if len(team) != 2:\n print \"Error: Two team members must be given: Program will exit!\"\n sys.exit()\n\n temp_empl = [0, 0]\n \n try :\n # Loop both team members on row and check if the are in the list\n for i in range(0, 2):\n # Check for team on position teamnumber-1000\n if employee_number_list[int(team[i])-1000][1] == False:\n # Employee is not found in list, add it!\n temp_empl[i] = Employee(team[i]) \n employee_list_total.append(temp_empl[i])\n # Set employee to been found\n employee_number_list[int(team[i])-1000][1] = True\n # Set reference to the employee object \n employee_number_list[int(team[i])-1000][0] = temp_empl[i]\n else:\n # Retrive the employee object\n temp_empl[i] = employee_number_list[int(team[i])-1000][0]\n \n except ValueError:\n print \"Error: Input must be numeric. Program will exit!\"\n sys.exit()\n \n i = 0 \n for i in range(0, 2):\n # Add co_workers to respectivly employee\n if i == 0:\n temp_empl[i].add_co_worker(temp_empl[1])\n else:\n temp_empl[i].add_co_worker(temp_empl[0])\n \n # Return the list of employees\n return employee_list_total", "def generate_payslip_data(employee_data):\n payslip_data = []\n\n for employee in employee_data:\n gross_income = monthly_gross_income(employee['annual_salary'])\n income_tax = monthly_income_tax(\n employee['annual_salary'], tax_brackets)\n net_income = monthly_net_income(\n gross_income, income_tax)\n super_amount = monthly_super_amount(\n gross_income, employee['super_rate'])\n\n payslip_data.append({\n 'full_name': employee['first_name'] + ' ' + employee['last_name'],\n 'payment_period': employee['payment_period'],\n 'gross_income': gross_income,\n 'income_tax': income_tax,\n 'net_income': net_income,\n 'super_amount': super_amount\n })\n\n return payslip_data", "def make_employee_dict(names, ID_numbers, salaries, email_addresses):\r\n d = dict()\r\n for i in range(len(names)):\r\n d[ID_numbers[i]] = Employee(names[i], ID_numbers[i], salaries[i], email_addresses[i])\r\n return d", "def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]", "def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees", "def create_emp(self, name, pos, dept):\n if pos.upper() == 'MANAGER':\n self.create_manager(name, pos, dept)\n elif pos.upper() == 'SENIOR':\n self.create_senior(name, pos, dept)\n elif pos.upper() == 'JUNIOR':\n self.create_junior(name, pos, dept)\n else:\n self.create_trainee(name, pos, dept)", "def generate_milestone_data(supervisor_employee_dict, all_employee_dict, run_date):\n supervisor_milestone_list = []\n for supervisor_id in supervisor_employee_dict:\n supervisor_milestone_dict = {}\n employees = supervisor_employee_dict[supervisor_id]\n employee_dict = {}\n\n milestone_counter = 0\n\n # Remove the supervisor from all the employees leaving the non-managers behind\n all_employee_dict.pop(supervisor_id, None)\n supervisor_milestone_dict['supervisor_id'] = supervisor_id\n\n for emp in employees:\n hire_date = emp.get('hire_date')\n emp_id = emp.get('employee_id')\n anv_dates = calculate_anniversary_dates(\n hire_date,\n run_date\n )\n\n # This is built to support employees that share a common milestone date\n for date in anv_dates:\n group = employee_dict.setdefault(date, [])\n group.append(emp_id)\n\n # Sort the dict by date by converting into tuple and sorting\n milestone_tuple = [(v, k) for k, v in employee_dict.iteritems()]\n sorted_ms_tup = sorted(milestone_tuple, key=itemgetter(1))\n upcoming_milestone_list = []\n\n for employee_id_list, milestone_date in sorted_ms_tup:\n for emp_id in employee_id_list:\n\n # Do not print out more than 5 milestones\n if milestone_counter == 5:\n break\n\n upcoming_milestone = {\n 'employee_id': emp_id,\n 'anniversary_date': str(milestone_date)\n }\n upcoming_milestone_list.append(upcoming_milestone)\n milestone_counter += 1\n\n supervisor_milestone_dict['upcoming_milestones'] = upcoming_milestone_list\n supervisor_milestone_list.append(supervisor_milestone_dict)\n\n return supervisor_milestone_list, all_employee_dict", "def load_employees(self):\n empcsv = open('employees.csv','r')\n emp_temp = []\n empcsv = empcsv.readlines()[1:]\n for line in empcsv:\n for i in line.split(','):\n if line == 0:\n pass\n else:\n emp_temp.append(i)\n employee = emp_temp[0::13]\n data_1 = []\n data = []\n for i in emp_temp:\n if i in employee:\n pass\n else:\n data_1.append(i)\n for i in range(26):\n data_temp = data_1[(i * 12):((i + 1) * 12)]\n data.append(data_temp)\n for i in range(len(employee)):\n self.emp_dict[employee[i]] = data[i]\n #print(self.emp_dict)\n for i in self.emp_dict:\n self.emp_dict[i] = [x.replace('\\n', '') for x in self.emp_dict[i]]\n return self.emp_dict", "def create_member_objects(self, regh, keyspec, msg, user_data):\n logger.info(\"Creating member data objects !!!\")\n\n id = 1\n for id in range(1, 10):\n emp = RwDtsToyTaskletYang.Employee()\n emp.name = 'jdoe' + str(id)\n emp.age = 30 + id\n emp.phone = '978-863-00' + str(id)\n emp.ssn = '123-45-000' + str(id)\n path = '/rw-dts-toy-tasklet:employee[rw-dts-toy-tasklet:name=\\'jdoe' + str(id) + '\\']'\n status, ks = self.apih.keyspec_from_xpath(path)\n logger.debug(\"keyspec_from_xpath returned %s for path %s\", status, ks)\n status = self.mbdreg.create_element_keyspec(ks, emp.to_pbcm())\n logger.debug(\"create_element_keyspec returned %s for path %s\", status, ks)\n\n #Update an element\n path = '/rw-dts-toy-tasklet:employee[rw-dts-toy-tasklet:name=\\'jdoe9\\']'\n status, ks = self.apih.keyspec_from_xpath(path)\n logger.debug(\"keyspec_from_xpath returned %s for path %s\", status, ks)\n emp = RwDtsToyTaskletYang.Employee()\n emp.name = 'jdoe9' + str(id)\n emp.age = 41\n emp.phone = '978-863-099'\n emp.ssn = '123-45-0099'\n status = self.mbdreg.update_element_keyspec(ks, emp.to_pbcm(), RwDts.XactFlag.REPLACE)\n logger.info(\"Updated the object with key = %s status = %s\", path, status)\n\n # Now read it back\n status, out_ks, pbcm = self.mbdreg.get_element_keyspec(ks)\n logger.info(\"Get returned status=%s, pbcm=%s out_ks = %s\", status, pbcm, out_ks)\n employee = RwDtsToyTaskletYang.Employee.from_pbcm(pbcm)\n logger.info(\"Read record is %s\", employee)\n\n # Now read with xpath\n status, pbcm,out_ks = self.mbdreg.get_element_xpath('C,/rw-dts-toy-tasklet:employee[rw-dts-toy-tasklet:name=\\'jdoe8\\']')\n logger.info(\"Get returned using xpath status=%s pbcm=%s out_ks = %s\", status, pbcm, out_ks)\n employee = RwDtsToyTaskletYang.Employee.from_pbcm(pbcm)\n logger.info(\"Read record using xpath is %s\", employee)\n\n # Get a cursor and walk the list\n cursor = self.mbdreg.get_cursor()\n msg, ks = self.mbdreg.get_next_element(cursor)\n\n while msg is not None:\n employee = RwDtsToyTaskletYang.Employee.from_pbcm(msg)\n logger.info(\"Read record using get next api %s\", employee)\n msg, ks = self.mbdreg.get_next_element(cursor)\n \n self.mbdreg.delete_cursors()", "def make_salaried(self,salary,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"2\"\n print(\"{}{}\".format(name,\" was successfully changed to be a salaried employee\"))\n self.emp_dict[id][7] = salary\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def atten_date(list_emp, name, start_rep, end_rep):\r\n with open(\"attendance_log.txt\", \"w\") as attendance_by_emp:\r\n # writes new\\re writes attendance_log from the beginning\r\n attendance_by_emp.seek(0)\r\n attendance_by_emp.write(\"Employee Attendance Report %s-%s:\\n\" % (start_rep, end_rep))\r\n for worker in list_emp:\r\n if worker.name == name:\r\n # found worker name in list\r\n attendance_by_emp.write(\"%s-\\n\" % worker.name)\r\n for date in worker.attendance:\r\n # writing dates in same representation for comparison\r\n date_log = time.strptime(date[:10:], \"%d/%m/%Y\")\r\n start_date = time.strptime(start_rep, \"%d/%m/%Y\")\r\n end_date = time.strptime(end_rep, \"%d/%m/%Y\")\r\n # comparing dates\r\n if date_log > start_date:\r\n if date_log < end_date:\r\n attendance_by_emp.write(\"\\t\" + date + '\\n')\r\n # finished going through dates\r\n print(\"Report issued!\\n\")\r\n return\r\n # worker not found in list\r\n print(\"Sorry, worker not in log\")\r\n return", "def make_individual_agents_2016(self):\r\n for hh_row in agents: # agents is a list of ints 1-94 from excel_import\r\n individual_id_list = return_values(hh_row, 'name')\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.hh_id = hh_id\r\n agelist = return_values(hh_row, 'age') # find the ages of people in hh\r\n genderlist = return_values(hh_row, 'gender')\r\n marriagelist = return_values(hh_row, 'marriage')\r\n educationlist = return_values(hh_row, 'education')\r\n income_local_off_farm = float(return_values(hh_row, 'income_local_off_farm'))\r\n income_local_off_farm_list[hh_row - 1] = income_local_off_farm\r\n household_income_list[hh_row - 1] = household_income_list[hh_row - 1] + income_local_off_farm\r\n if individual_id_list is not None and individual_id_list is not []:\r\n for i in range(len(individual_id_list)):\r\n self.individual_id = str(self.hh_id) + str(individual_id_list[i]) # example: 2c\r\n self.age = int(agelist[i])\r\n # if genderlist is not None and genderlist is not []:\r\n self.gender = int(genderlist[i])\r\n try:\r\n self.education = educationlist[i]\r\n except:\r\n self.education = 0\r\n self.marriage = marriagelist[i]\r\n IndividualAgent.create_initial_migrant_list(self, hh_row)\r\n self.age_at_step_0 = self.age\r\n self.income_local_off_farm = return_values(self.hh_row, 'income_local_off_farm')\r\n ind = IndividualAgent(hh_row, self, self.hh_id, self.individual_id, self.age, self.gender,\r\n self.education, self.marriage, self.past_hh_id, self.non_gtgp_area,\r\n self.step_counter, self.age_at_step_0, self.income_local_off_farm)\r\n self.schedule.add(ind)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def gather_employee_entries(self):\n user_inputs = [\n self.emp_lname.get(), self.emp_mi.get(), self.emp_fname.get(),\n self.emp_hiredate.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def get_emp_data(self,employee):\n\t\temp = None\n\t\tfind_by = employee.find_elements_by_tag_name\n\t\tif str(type(employee)) != \"<type 'NoneType'>\" and main.is_desktop():\n\t\t\t# columns = employee.find_elements_by_tag_name(\"td\")\n\t\t\temp = {\n\t\t\t\t'name': find_by('td')[0].text,\n\t\t\t\t'id': find_by('td')[1].text,\n\t\t\t\t'status': find_by('td')[2].text,\n\t\t\t\t'election': find_by('td')[3].text,\n\t\t\t\t'date_changed': find_by('td')[4].text\n\t\t\t}\n\t\telif str(type(employee)) != \"<type 'NoneType'>\":\n\t\t\temp = {\n\t\t\t\t'name': find_by('div')[2].text,\n\t\t\t\t'id': find_by('div')[3].text[13:],\n\t\t\t\t'status': find_by('div')[4].text[8:], #Fail 4:20p, StaleEl\n\t\t\t\t'election': find_by('div')[5].text[17:], #Fail 4:15p, StaleEl\n\t\t\t\t'date_changed': find_by('div')[6].text[14:]\n\t\t\t}\n\n\t\t# raw_input(str(emp))\n\t\treturn emp", "def table_maker():\r\n try:\r\n off_copy = off.copy()\r\n man_copy = man.copy()\r\n exe_copy = exe.copy()\r\n ceo_copy = ceo.copy()\r\n list_of_lists = [off_copy, man_copy, exe_copy, ceo_copy]\r\n\r\n for i in list_of_lists:\r\n for j in i:\r\n if type(j) == str:\r\n continue\r\n else:\r\n raise ValueError('All elements must be strings')\r\n\r\n row_num = max(len(off_copy), len(man_copy),\r\n len(exe_copy), len(ceo_copy))\r\n for i in list_of_lists:\r\n if len(i) != row_num:\r\n diff = row_num - len(i)\r\n for j in range(diff):\r\n i.append('')\r\n\r\n t = PrettyTable(\r\n ['Office Workers', 'Managers', 'Executives', 'CEO'])\r\n for i in range(row_num):\r\n t.add_row([off_copy[i], man_copy[i], exe_copy[i], ceo_copy[i]])\r\n\r\n with open('Employee Table.txt', 'w') as f:\r\n f.write(str(t))\r\n\r\n except FileNotFoundError:\r\n print(\"Error: No file entered\")", "def __get_emptiest (self, person):\n doubles = []\n for scheduling_unit in person.get_scheduling_units ( ):\n doubles.append ((scheduling_unit, self.__calculate_emptiness (scheduling_unit)))\n \n doubles.sort (key=lambda trio: trio[1])\n \n scheduling_units = []\n for double in doubles:\n scheduling_units.append (double[0])\n \n return scheduling_units", "def create_data_structure(self, records):\n col_title = [[] , []]\n all_exp = {}\n\n # Get all employee info, used for obtain employee account info\n emp_info = self.env['hr.employee'].sudo().search([])\n\n def append_data_list():\n # {a: {b: 1}}\n for rec in records:\n current_emp_acc = ''\n for emp in emp_info:\n if emp.name == rec.expense_id.employee_name:\n current_emp_acc = emp.bank_account_id.acc_number\n print(\"this employee: \" + rec.expense_id.employee_name + \"'s Acc No. is: \" + current_emp_acc)\n cur = rec.expense_line_currency.currency\n if not cur == \"RMB\":\n cur = \"HKD\"\n\n cat_key = (str(rec.expense_cate_id.acc_no), str(rec.expense_debit_acc_name))\n emp_key = (\n str(rec.expense_id.employee_id.rmb_account_no) if cur == \"RMB\" else current_emp_acc,\n rec.employee_name\n )\n exp_key = (\n str(rec.expense_id.expense_create_date),\n str(rec.expense_id.expense_num),\n str(rec.expense_id.name)\n )\n amt = float(rec.expense_line_cost) if cur == \"RMB\" else float(rec.expense_line_calculate)\n\n # -----\n if cur not in all_exp:\n all_exp[cur] = {}\n if emp_key not in all_exp[cur]:\n all_exp[cur][emp_key] = {}\n if exp_key not in all_exp[cur][emp_key]:\n all_exp[cur][emp_key][exp_key] = {}\n\n all_exp[cur][emp_key][exp_key][cat_key] = all_exp[cur][emp_key][exp_key].get(cat_key, 0.0) + amt\n\n if cur == \"HKD\" and cat_key not in col_title[0]:\n col_title[0].append(cat_key)\n elif cur == \"RMB\" and cat_key not in col_title[1]:\n col_title[1].append(cat_key)\n for titles in col_title:\n titles.sort()\n\n return\n\n append_data_list()\n\n # for emp_dict in all_exp:\n # all_exp[emp_dict] = collections.OrderedDict(sorted(all_exp[emp_dict].items())) # sort exp data keys\n # all_exp = collections.OrderedDict(sorted(all_exp.items())) # sort all_exp emp data keys by ID\n\n # get the selected exp line id for later use\n with open('./select_rec.txt', 'w') as sr:\n for rec in records:\n id_str = str(rec.id) + '\\n'\n sr.write(id_str)\n rev_name = records[0].expense_id.rec_reviewer_name\n\n for cur in all_exp:\n self.create_exl_report(col_title[0] if cur == \"HKD\" else col_title[1], all_exp[cur], rev_name, cur)\n self.create_HSBC_txt(all_exp[cur], cur)\n\n\n # return\n\n #\n # self.create_HSBC_txt(all_exp)\n\n return {\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'hhexpense.anticipated.date',\n 'type': 'ir.actions.act_window',\n }", "def main():\n # create a list of test employees and managers\n testList = [\n {'type': 'employee', 'firstName': 'Mickey', 'lastName': 'Mouse', 'SSN': '100-12-3456', 'salary': 1500.00},\n {'type': 'manager', 'firstName': 'Walt', 'lastName': 'Disney', 'SSN': '100-00-0000', 'salary': 5000.00,\n 'title': 'Head Of Disneyland', 'yearBonus': 1000.00},\n {'type': 'employee', 'firstName': 'Donald', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 1000.00},\n {'type': 'manager', 'firstName': 'Minnie', 'lastName': 'Mouse', 'SSN': '999-99-999', 'salary': 10000.00,\n 'title': 'Head Of Mouse HouseHold', 'yearBonus': 15000.00},\n {'type': 'manager', 'firstName': 'Daisy', 'lastName': 'Duck', 'SSN': '100-65-4321', 'salary': 12000.00,\n 'title': 'Head Of Duck HouseHold', 'yearBonus': 10000.00}]\n\n # Define percentRaise (0.1 == 10%)\n percentRaise = 0.1\n\n # Create Employees and Managers Object using the Test data\n employeeList = loadEmployees(testList)\n\n # Sort employee List, which will ustilize Employee's __lt__ and __eq__ methods\n employeeList.sort()\n\n # Loop over Employee and Manager Objects\n print(\"Employees and Manager should be sorted by last name, then first\\n\")\n for employee in employeeList:\n if type(employee) == Manager:\n print(\"Manager:\")\n else:\n print(\"Employee:\")\n # Print Employee or Manager\n print(employee)\n # Give Raise to Employee or Manager\n employee.giveRaise(percentRaise)\n # Print New Salary\n print(\"With %.2f%% Raise, Salary: $%.2f\\n\" % (percentRaise * 100, employee.salary))\n\n # Employee docStrings\n print(\"\\nEmployee docstring for each method\")\n print(\"Employee.__doc__=\" + Employee.__doc__)\n print(\"Employee.__init__.__doc__=\" + Employee.__init__.__doc__)\n print(\"Employee.giveRaise.__doc__=\" + Employee.giveRaise.__doc__)\n print(\"Employee.__str__.__doc__=\" + Employee.__str__.__doc__)\n print(\"Employee.__eq__.__doc__=\" + Employee.__eq__.__doc__)\n print(\"Employee.__lt__.__doc__=\" + Employee.__lt__.__doc__)\n\n print(\"\\nManger docstring for each method\")\n print(\n \"Since Manager inherits from Employee, several of the methods ('giveRaise', '__eq__' and '__lt__') and the corresponding docstring will originate from the Employee class\\n\")\n print(\"Manager.__doc__=\" + Manager.__doc__)\n print(\"Manager.__init__.__doc__=\" + Manager.__init__.__doc__)\n print(\"Manager.giveRaise.__doc__=\" + Manager.giveRaise.__doc__)\n print(\"Manager.__str__.__doc__=\" + Manager.__str__.__doc__)\n print(\"Manager.__eq__.__doc__=\" + Manager.__eq__.__doc__)\n print(\"Manager.__lt__.__doc__=\" + Manager.__lt__.__doc__)", "def generate_24hrs():\n\n hrs = []\n\n for i in xrange(1, 25):\n hrs.append([i, 0])\n\n return hrs" ]
[ "0.6326731", "0.6174064", "0.6145489", "0.60907257", "0.5949069", "0.59248495", "0.5886534", "0.5878822", "0.5804044", "0.5747758", "0.57071424", "0.56668836", "0.5626319", "0.5592785", "0.5565836", "0.5512038", "0.55018", "0.5469139", "0.5452839", "0.5449946", "0.54412967", "0.53849924", "0.538053", "0.5379869", "0.5354524", "0.53181064", "0.530494", "0.5264882", "0.52588767", "0.5251533" ]
0.7532919
0
Customer data generation based on weighted random choices
def generateCustomers(self): # Counters shoppers = 0 models = 0 oldl = 0 oldf = 0 doctor = 0 nudist = 0 hippie = 0 nerd = 0 for i in range(self.num_of_customers): # With these weights, our store has plenty of youngs and olds, but few mids # Most grocery shoppers come in the evening # Young people have equal distribution between morning and evening # etc age1 = random.randint(18, 28) age2 = random.randint(28, 50) age3 = random.randint(50, 85) weighted_ages = [(age1, 10), (age2, 2), (age3, 15)] randomAge = [val for val, cnt in weighted_ages for a in range(cnt)] hour1 = random.randint(8, 13) hour2 = random.randint(13, 18) hour3 = random.randint(18, 22) weighted_hours = [(hour1, 10), (hour2, 3), (hour3, 20)] randomHour = [val for val, cnt in weighted_hours for b in range(cnt)] age = random.choice(randomAge) hour = random.choice(randomHour) gender = random.choice(['M', 'M', 'M', 'M', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F']) # Base chances, 100 total gs, sm, hp, ol, nrd, of, sd, nud = 20, 5, 5, 5, 5, 5, 10, 10 customerID = random.randint(0, self.num_of_customers*2) while customerID in self.all_customers: customerID = random.randint(0, self.num_of_customers*2) # Weights if 18 < age < 22: if gender == 'M': if 8 <= hour <= 12: ol, sm, nrd, hp = 2, 2, 35, 20 elif 13 <= hour <= 17: ol, sm, nrd, hp, gs = 2, 2, 15, 30, 5 elif 18 <= hour <= 22: ol, sm, gs = 2, 2, 50 elif gender == 'F': if 8 <= hour <= 12: ol, sm, nrd = 5, 35, 15 elif 13 <= hour <= 17: ol, sm, hp = 5, 30, 30 elif 18 <= hour <= 22: ol, sm, gs, = 5, 25, 50 elif gender == 'M' and 22 < age < 29: if 8 <= hour <= 12: ol, sm, nrd, hp = 5, 5, 35, 25 elif 13 <= hour <= 17: ol, sm, nrd, hp = 5, 5, 35, 40 elif 18 <= hour <= 22: ol, sm, nrd, hp, gs = 5, 5, 20, 20, 50 elif gender == 'M' and 29 < age < 50: if 8 <= hour <= 12: ol, sm, nrd, gs = 5, 5, 40, 30 elif 13 <= hour <= 17: ol, sm, nrd = 5, 5, 30 elif 18 <= hour <= 22: ol, sm, gs = 5, 5, 70 elif gender == 'M' and age > 50: if 8 <= hour <= 12: ol, sm, gs, of, hp = 5, 5, 30, 60, 20 elif 13 <= hour <= 17: ol, sm, gs, of, hp = 5, 5, 15, 70, 20 elif 18 <= hour <= 22: ol, sm, gs, of, hp = 5, 5, 50, 25, 20 elif gender == 'F' and 22 < age < 35: if 8 <= hour <= 12: ol, sm, hp, gs = 5, 30, 30, 30 elif 13 <= hour <= 17: ol, sm, hp, gs = 5, 30, 30, 15 elif 18 <= hour <= 22: ol, sm, hp, gs = 5, 15, 25, 60 elif gender == 'F' and 35 < age < 55: if 8 <= hour <= 12: ol, sm, hp, gs = 5, 5, 5, 40 elif 13 <= hour <= 17: ol, sm, hp, gs = 25, 5, 5, 25 elif 18 <= hour <= 22: ol, sm, hp, gs = 30, 5, 5, 40 elif gender == 'F' and age > 55: if 8 <= hour <= 12: ol, sm, of, gs = 20, 5, 15, 30 elif 13 <= hour <= 17: ol, sm, of, gs = 60, 5, 30, 15 elif 18 <= hour <= 22: ol, sm, of, gs = 40, 5, 20, 40 weighted_choices = [('Grocery Shopper', gs), ('Supermodel', sm), ('Hippie', hp), ('Old Lady', ol), ('Nerd', nrd), ('Self Doctor', sd), ('Nudist', nud), ('Old Fart', of)] randomType = [val for val, cnt in weighted_choices for n in range(cnt)] customer = random.choice(randomType) if customer == 'Grocery Shopper': shoppers += 1 num_of_purchases = random.randint(0, 20) foodChance = 18 medicalChance = 3 electronicsChance = 1 outdoorsChance = 1 clothingChance = 1 beautyChance = 2 self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer) self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases] elif customer == 'Supermodel': models += 1 num_of_purchases = random.randint(0, 20) foodChance = 0 medicalChance = 5 electronicsChance = 0 outdoorsChance = 0 clothingChance = 10 beautyChance = 13 self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer) self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases] elif customer == 'Hippie': hippie += 1 num_of_purchases = random.randint(0, 20) foodChance = 6 medicalChance = 2 electronicsChance = 1 outdoorsChance = 14 clothingChance = 7 beautyChance = 1 self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer) self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases] elif customer == 'Old Lady': oldl += 1 num_of_purchases = random.randint(0, 20) foodChance = 6 medicalChance = 8 electronicsChance = 0 outdoorsChance = 0 clothingChance = 3 beautyChance = 10 self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer) self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases] elif customer == 'Nerd': nerd += 1 num_of_purchases = random.randint(0, 20) foodChance = 4 medicalChance = 3 electronicsChance = 14 outdoorsChance = 0 clothingChance = 2 beautyChance = 1 self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer) self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases] elif customer == 'Self Doctor': doctor += 1 num_of_purchases = random.randint(0, 20) foodChance = 5 medicalChance = 32 electronicsChance = 4 outdoorsChance = 1 clothingChance = 2 beautyChance = 1 self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer) self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases] elif customer == 'Nudist': nudist += 1 num_of_purchases = random.randint(0, 20) foodChance = 10 medicalChance = 5 electronicsChance = 0 outdoorsChance = 14 clothingChance = 0 beautyChance = 0 self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer) self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases] elif customer == 'Old Fart': oldf += 1 num_of_purchases = random.randint(0, 20) foodChance = 10 medicalChance = 18 electronicsChance = 5 outdoorsChance = 3 clothingChance = 3 beautyChance = 0 self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer) self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases] itemsBought = (", ".join(repr(e) for e in self.customer_purchases)) self.c.execute("INSERT INTO Customer (CustomerID, Hour, Age, Gender, Items) VALUES (?, ?, ?, ?, ?)", (customerID, hour, age, gender, itemsBought)) self.conn.commit() if self.print_counters: print("\nShoppers:", shoppers) print("Models:", models) print("Old Ladies:", oldl) print("Old Farts:", oldf) print("Self doctors:", doctor) print("Nerds:", nerd) print("Hippies:", hippie) print("Nudists:", nudist) if self.print_customers: print("\nRaw Customer Data: ") print(self.all_customers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generatePurchases(self, num_of_purchases, food, medical, electronics, outdoors, clothing, beauty, customer):\r\n\r\n # Empty purchases\r\n self.customer_purchases = []\r\n\r\n # Customer is *likely* to buy from some categories, but anything can happen\r\n weighted_categories = [('Food', food), ('Medical', medical), ('Electronics', electronics), ('Outdoors', outdoors), ('Clothing', clothing), ('Beauty', beauty)]\r\n randomCategory = [val for val, cnt in weighted_categories for i in range(cnt)]\r\n\r\n # Buy items\r\n for i in range(num_of_purchases):\r\n choice = random.choice(randomCategory)\r\n if choice == 'Food':\r\n tempIndex = random.randint(self.num_of_items, self.num_of_items*2-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Medical':\r\n tempIndex = random.randint(self.num_of_items*2, self.num_of_items*3-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Electronics':\r\n tempIndex = random.randint(3*self.num_of_items, 4*self.num_of_items-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Outdoors':\r\n tempIndex = random.randint(4*self.num_of_items, 5*self.num_of_items-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Clothing':\r\n tempIndex = random.randint(5*self.num_of_items, 6*self.num_of_items-1)\r\n self.customer_purchases.append(tempIndex)\r\n elif choice == 'Beauty':\r\n tempIndex = random.randint(6*self.num_of_items, 7*self.num_of_items-1)\r\n self.customer_purchases.append(tempIndex)\r\n\r\n if self.print_customers:\r\n print(self.customer_purchases)", "def generate_products(self = random.sample, name = random.choice(result), price = random.randint(5, 100), weight = random.randint(5, 100), \nflammability= random.uniform(0, 2.5)):\n return sample", "def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)", "def generate_customer(self):\n customer_rates = np.random.multivariate_normal(\n mean=self.behave_means, cov=self.behave_cov\n )\n customer_rates = customer_rates.clip(\n min=self.min_rate\n ) # clip : no negative rates!\n new_customer = Customer(customer_rates)\n # print(customer_rates)\n return new_customer", "def generateSupplies(self):\n typePart = ['wrench','resistor','bulb','mushroom','coin']\n chosenPart = []\n for i in range(3):\n randomPart = choice(typePart)\n chosenPart.append(randomPart)\n typePart.remove(randomPart)\n for part in chosenPart:\n amount = randint(1,3)\n self._supplies.append(Node(part,amount))", "def sampleWeight(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n #Distribute the exploration weight evenly among all the actions that have been\r\n #taken up to this point in time by any of the users\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n #Compute the normalization factor. If no action has been sampled by this user yet,\r\n #then each action k has weight eta*pi_k, where pi_k is the weight of k in the\r\n #prior distribution. Then, the normalization factor is the sum(eta*pi_k) for all k,\r\n #which is equal to eta*sum(pi_k), which is just eta, since the sum of the previous\r\n #weights has to add up to 1.\r\n #If one or more actions have been already sampled, the normalization factor is the\r\n #sum of 1) the weights already in self.weights, 2) the exploration fund, and 3) the\r\n #weights of the actions that are not yet in self.weights. Each one of these actions\r\n #has weight eta*pi_k (because it hasn't been sampled yet), so the total weight of the\r\n #mass of actions not yet in self.weights is eta*(1-sum(pi_l)), where the sum is over all\r\n #the weights already in self.weights\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n #Keep getting the next weight until the combined mass of the weights is less than the\r\n #random number x\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return w\r\n i += 1", "def weightedChoice(weights, objects, apply_softmax=False, alpha=None):\n if apply_softmax: weights = softmax(weights)\n if alpha: weights = normalize([w**alpha for w in weights])\n cs = np.cumsum(weights) #An array of the weights, cumulatively summed.\n idx = sum(cs < np.random.rand()) #Find the index of the first weight over a random value.\n idx = min(idx, len(objects)-1)\n return objects[idx]", "def _random_customer(cust_dtls) -> tuple:\n return choices(cust_dtls)[0]", "def gen_data(self, amount):\n\n return random.choices(self.indices, weights=self.weights, k=amount)", "def weighted_bright():\n return np.random.choice(\n [17, 18, 19, 20, 21] * 1 +\n [22, 23, 24, 25, 26] * 2 +\n [27, 28, 29, 30, 31] * 3\n )", "def _random_weight(self):\n return random.uniform(MIN_WEIGHT, MAX_WEIGHT)", "def weighted_choice(weights):\n totals = []\n running_total = 0\n\n for w in weights:\n running_total += w\n totals.append(running_total)\n\n rnd = random.random() * running_total\n for i, total in enumerate(totals):\n if rnd < total:\n return i", "def weighted_choice(weighted_items, num_items=1):\n total = 0\n cume_list = []\n\n for item, weight in weighted_items.items():\n total += weight\n cume_list.append([item, total])\n\n for pair in cume_list:\n pair[1] /= total\n\n items = []\n\n for _ in range(num_items):\n rand = random()\n\n for item, val in cume_list:\n if rand <= val:\n items.append(item)\n break\n\n assert num_items == len(items), (weighted_items, items)\n\n if num_items == 1:\n return items[0]\n\n return items", "def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item", "def choice(population,weights):\r\n\tassert len(population) == len(weights)\r\n\tcdf_vals=cdf(weights)\r\n\treturn population[bisect.bisect(cdf_vals, random.random())]", "def random_choice(options, weights): \n r = random.random()\n for i, c in enumerate(cumsum(weights)):\n if r <= c:\n return options[i]", "def _generate_weights(self):\n weights = [random.uniform(0, 1) for x in range(self.num_weights)]\n return self._normalize_weights(weights)", "def weightedrandomchoice(items): # {{{2\n total = 0\n items.sort(reverse=True, key=lambda x:x[0])\n for item in items:\n total += item[0]\n threshold = random.uniform(0, 0.6) * total\n for item in items:\n threshold -= item[0]\n if threshold <= 0:\n return item[1]", "def make_drink ():\n \n customer_pref = customer_order.drink_order()\n drink = []\n \n for pref in customer_pref:\n if customer_pref[pref] == True:\n drink.append(random.choice(ingredients[pref]))\n \n return drink", "def weighted_choice(items):\n weight_total = sum((item[1] for item in items))\n n = random.uniform(0, weight_total)\n for item, weight in items:\n if n < weight:\n return item\n n = n - weight\n return item", "def weighted_choice(items: List[Tuple[str, float]]) -> str:\r\n total_weight = sum(item[1] for item in items)\r\n n = random.uniform(0, total_weight)\r\n for item, weight in items:\r\n if weight > n:\r\n return item\r\n n -= weight\r\n return item", "def weightedRandFromDict(dictionary):\n items = dictionary.items()\n keys = [item[0] for item in items]\n weights = [item[1] for item in items]\n return choices(keys,weights=weights,k=1)[0]", "def randomizeWeights(self, rand_distr):\n raise NotImplementedError", "def _weighted_choice(self, lst):\n \n total_weight = reduce(lambda x,y:x+y, [tup[1] for tup in lst])\n n = random.uniform(0, total_weight)\n for item, weight in lst:\n if n < weight:\n break\n n = n - weight\n return item", "def calculate_weighted_results():\n pass", "def random_weight():\n # We found that random.randrange(-1,2) to work well emperically \n # even though it produces randomly 3 integer values -1, 0, and 1.\n return random.randrange(-1, 2)\n\n # Uncomment the following if you want to try a uniform distribuiton \n # of random numbers compare and see what the difference is.\n # return random.uniform(-1, 1)", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set", "def weighted_choice(choices):\n return choice(sum(([value]*weight for value, weight in choices), []))", "def makeRandomSet(weightedSet): \n\tnewtrainingSet = []\n\t\n\t#Make a list starting with zero and ending with MAX_INT\n\t#Where all of values inbetween are the probs of choosing this element\n\tprobs = [0]\n\tfor ex in weightedSet:\t\t\n\t\tprobs.append(ex.weight + probs[-1])\n\t\n\tprobs.append(_MAX_INT)\n\t\t\n\tfor i in range(len(weightedSet)): #Get N new smaples\n\t\trandNum = _random()\n\t\tfor j in range(len(probs) - 1 ):\t\n\t\t\tif randNum >= probs[j] and probs[j+1] > randNum : \n\t\t\t\tnewtrainingSet.append(boostExample(\n\t\t\t\t\tLabeledExample(weightedSet[j-1],label=weightedSet[j-1].label) ,weightedSet[j-1].weight) )\n\t\t\t\tbreak\n\t\t\n\t\n\treturn newtrainingSet", "def weighted_choice(options: np.ndarray, weights: np.ndarray) -> Union[int,float]:\n assert len(options) == len(weights) != 0\n total = np.sum(weights)\n rand = np.random.rand() * total\n for i in range(len(options)):\n option = options[i]\n weight = weights[i]\n if weight < rand:\n rand -= weight\n else:\n break\n return option" ]
[ "0.67562157", "0.6674905", "0.66366154", "0.64034915", "0.63300693", "0.6307731", "0.62676364", "0.61813587", "0.61580294", "0.6153797", "0.6138154", "0.61247575", "0.60950094", "0.60847926", "0.60655224", "0.60615486", "0.6052468", "0.6047676", "0.59948903", "0.59820014", "0.59640044", "0.5914096", "0.58941334", "0.5873881", "0.5866264", "0.5859825", "0.5848816", "0.5819095", "0.5764863", "0.5760165" ]
0.75274
0
This function calculates the Fourier Transform of a specific signal
def DFT(signal): n = signal.shape[0] omega = np.exp(((((-2) * np.pi)*1j) / n)) e_items = np.vander(omega**np.arange(n), n, True) fourier_signal = np.dot(e_items, signal) return fourier_signal.astype(np.complex128)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numpyFourierTransform(self,graph):\n z=[complex(*graph[i]) for i in range(len(graph))]\n return np.fft.fft(z)", "def FourierTransform(data, nPoints):\r\n tdf = np.fft.fft(data, nPoints)\r\n return tdf", "def fourier_transform(signal, fs):\n freqs = np.fft.rfftfreq(4*len(signal), 1/fs)\n fft = np.abs(np.fft.rfft(signal, 4*len(signal)))\n return freqs, fft", "def DFT(signal):\n signal = signal.astype(np.float64)\n # find the length of the signal\n N = signal.shape[0]\n if signal.ndim == 2:\n M, N = signal.shape\n\n # calculate DFT matrix\n u, v = np.meshgrid(np.arange(N), np.arange(N))\n omega = np.exp(-2 * np.pi * 1j / N)\n dft_matrix = np.power(omega, u*v)\n\n # if it is a matrix of signals\n if signal.ndim == 2:\n # calculate the Fourier Transform\n complex_fourier_signal = np.dot(dft_matrix, signal.transpose())\n return complex_fourier_signal.transpose()\n\n # calculate the Fourier Transform\n complex_fourier_signal = np.dot(dft_matrix, signal)\n return complex_fourier_signal", "def toFourier(self):\n\n\t\tif self.space==\"fourier\":\n\t\t\tpass \n\t\telse:\n\t\t\tself.data = fftengine.rfft2(self.data)\n\t\t\tself.space=\"fourier\"", "def ManualFourier(x):\n x = np.asarray(x, dtype=float)\n N = x.shape[0]\n\n if np.log2(N) % 1 > 0:\n raise ValueError(\"size of x must be a power of 2\")\n\n # N_min here is equivalent to the stopping condition above,\n # and should be a power of 2\n N_min = min(N, 32)\n\n # Perform an O[N^2] DFT on all length-N_min sub-problems at once\n n = np.arange(N_min)\n k = n[:, None]\n M = np.exp(-2j * np.pi * n * k / N_min)\n X = np.dot(M, x.reshape((N_min, -1)))\n\n # build-up each level of the recursive calculation all at once\n while X.shape[0] < N:\n X_even = X[:, :int(X.shape[1] / 2)]\n X_odd = X[:, int(X.shape[1] / 2):]\n factor = np.exp(-1j * np.pi * np.arange(X.shape[0])\n / X.shape[0])[:, None]\n X = np.vstack([X_even + factor * X_odd,\n X_even - factor * X_odd])\n return X.ravel()", "def spectrum_fourier(self):\r\n\r\n data = self.input.data\r\n sampling_rate = self.input.sampling_rate\r\n\r\n fft = fftpack.fft\r\n if np.any(np.iscomplex(data)):\r\n # Get negative frequencies, as well as positive:\r\n f = np.linspace(-sampling_rate/2., sampling_rate/2., data.shape[-1])\r\n spectrum_fourier = np.fft.fftshift(fft(data))\r\n else:\r\n f = tsu.get_freqs(sampling_rate, data.shape[-1])\r\n spectrum_fourier = fft(data)[..., :f.shape[0]]\r\n \r\n return f, spectrum_fourier", "def IDFT(fourier_signal):\n fourier_signal = fourier_signal.astype(np.complex128)\n # find the length of the signal\n N = fourier_signal.shape[0]\n if fourier_signal.ndim == 2:\n M, N = fourier_signal.shape\n\n # calculate IDFT matrix\n u, v = np.meshgrid(np.arange(N), np.arange(N))\n omega = np.exp(2 * np.pi * 1j / N)\n idft_matrix = np.power(omega, u*v)\n\n # if it is a matrix of fourier signals\n if fourier_signal.ndim == 2:\n # calculate the Fourier Transform\n signal = np.dot(idft_matrix, fourier_signal.transpose())\n return 1/N * signal.transpose()\n\n # calculate the inverse Fourier Transform\n signal = np.dot(idft_matrix, fourier_signal)\n return 1/N * signal", "def _irfft2d(f_x) :", "def fft(signal):\r\n if signal.size == 1:\r\n return signal\r\n\r\n even_part = fft(signal[::2]) # Only grab even elements\r\n odd_part = fft(signal[1::2]) # Only grab odd elements\r\n\r\n factor = np.exp(-2j * np.pi * np.arange(signal.size) / signal.size)\r\n return np.concatenate([even_part + factor[:int(signal.size / 2)] * odd_part,\r\n even_part + factor[int(signal.size / 2):] * odd_part])", "def fourier(data, temp_freq, axis, output = 'amplitude'):\n\t\t\n\t\n\t# take largest possible multiple of F1 from PSTH.\n\t# Generate freq and fft\n\t# generate amplitude\n\t# return amplitude, F0, F1 and F2 values", "def fft(self, z, out=None):\n z = np.asfarray(z)\n out = np.multiply(z, z, out)\n out *= -0.5\n np.exp(out, out)\n return out", "def FT(x, ufunc=np.real, real=False):\n assert isinstance(x, np.ndarray)\n if len(x.shape) == 1:\n if real:\n F = np.fft.rfft(x)\n else:\n F = np.fft.fft(x)\n elif len(x.shape) == 2:\n if real:\n F = np.fft.rfft2(x)\n else:\n F = np.fft.fft2(x)\n else:\n raise TypeError(\"The array should be 1D or 2D\")\n return ufunc(np.fft.fftshift(F))", "def fourier_der(im):\n ft_img = DFT2(im)\n ft_img = np.fft.fftshift(ft_img)\n\n n_x = im.shape[1]\n coeff_x = (2 * np.pi * 1j)/n_x\n u_freq = np.array([n if n < int(n_x/2) else (n-n_x) for n in range(n_x)]) * 1j\n u_freq = np.array([np.fft.fftshift(u_freq)]*im.shape[0]).transpose()\n dx_ft = coeff_x * IDFT2(np.fft.ifftshift(u_freq.transpose() * ft_img))\n\n m_y = im.shape[0]\n coeff_y = (2 * np.pi * 1j)/m_y\n v_freq = np.array([m if m < int(m_y/2) else (m-m_y) for m in range(m_y)]) * 1j\n v_freq = np.array([np.fft.fftshift(v_freq)] * im.shape[1]).transpose()\n tr = IDFT2(np.fft.ifftshift(v_freq * ft_img))\n dy_ft = coeff_y * tr\n\n magnitude = np.sqrt(np.abs(dx_ft)**2 + np.abs(dy_ft)**2)\n return magnitude.real.astype(np.float64)", "def fft(self, *args, **kwargs):\n return _image.image_fft(self, *args, **kwargs)", "def forward_transform(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n N = x\n\n #Fourier Transform matrix:\n ft = np.zeros([x,y], complex)\n count =0\n for u in range(0, x):\n for v in range(0, y):\n sum_ft = 0\n for i in range(0, x):\n for j in range(0, y):\n\n sum_ft = sum_ft + matrix[i, j] * (np.cos(((2*np.pi)/N)*(u*i + v*j)) - 1j*np.sin(((2*np.pi)/N)*(u*i + v*j)))\n\n ft[u, v] = sum_ft\n\n #print(u, v)\n if u != 0 and v != 0 and (u <= int(x/2) and v < int(y/2)):\n\n ft[(x - u), (y - v)] = np.real(ft[u, v]) - np.imag(ft[u, v] * 1j)\n count = count + 2\n \n if count == x*y:\n return ft\n return ft", "def IDFT(fourier_signal):\n n = fourier_signal.shape[0]\n omega = np.exp((((2 * np.pi)*1j) / n))\n\n e_items = np.vander(omega**np.arange(n), n, True)\n org_signal = np.dot(e_items, fourier_signal)/n\n\n return org_signal", "def fft(input, inverse=False):\n if not iscomplex(input):\n raise(TypeError('The input should be complex (e.g. last dimension is 2)'))\n if inverse:\n return torch.ifft(input, 3)\n return torch.fft(input, 3)", "def FourierSinusoids(F,w,Fs,synthesis=None):\n if synthesis==None:\n synthesis=0;\n \n Ts=1.0/Fs; \n xs=numpy.arange(0,1,Ts) \n \n signal=numpy.zeros(np.shape(xs));\n for i in range(len(F)):\n omega=2*np.pi*F[i];\n signal = signal+ w[i]*numpy.cos(-omega*xs);\n #plot the time domain signal \n subplot(2,1,1)\n plt.plot(range(0,len(signal)),signal)\n xlabel('Time')\n ylabel('Amplitude')\n title('time doman')\n \n #compute the fourier series coefficient\n r1=FourierSeries(signal)\n a1=cabs(r1)\n \n if synthesis==0:\n #plot the freuency domain signal\n L=len(a1);\n fr=np.arange(0,L);\n subplot(2,1,2)\n plt.stem(fr,a1,'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')\n title('complete signal')\n ticks=np.arange(0,L+1,25);\n plt.xticks(ticks,ticks); \n show() \n \n if synthesis==1:\n rsignal=IFourierSeries(r1);\n print np.allclose(rsignal, signal) \n subplot(2,1,2) \n plt.stem(xs,signal)\n xlabel('Time')\n ylabel('Amplitude')\n title('reconstructed signal')\n show()", "def ft(x):\n y = np.fft.rfft(x)\n\n phi = 2 * np.pi * np.random.random(len(y))\n\n phi[0] = 0.0\n if len(x) % 2 == 0:\n phi[-1] = 0.0\n\n y = y * np.exp(1j * phi)\n return np.fft.irfft(y, n=len(x))", "def calcFFT(self):\n try:\n arr = np.fromstring(self.fqueue, dtype=np.float32)\n # Slice for improved performance [200:len(arr)/2:50]#[len(arr)/2:]\n fft = np.fft.fft(arr)[len(arr)/2:]\n f = lambda x: 100 + 20. * log(abs(x))\n mag = map(f, fft)\n\n self.sock.send('f' + json.dumps({\n 'nsamp': len(mag),\n 'samples': mag}))\n self.fqueue = \"\"\n self.fcount = 0\n except:\n pass", "def fourier_coefficient(t, freq, n=1, axis=-1):\n t, freq, n = map(np.asanyarray, (t, freq, n))\n return np.exp(1j * 2 * np.pi * freq * n * t).sum(axis=axis)", "def fft_fun(data, func=set_segments_to_value, fn_args=[[(1400, None)]]):\n from numpy.fft import rfft, irfft\n\n rfft_data = rfft(data)\n func(rfft_data, *fn_args)\n return irfft(rfft_data)", "def DFTpower(time, signal, f0=None, fn=None, df=None, full_output=False):\n\n freqs = np.arange(f0,fn,df)\n Ntime = len(time)\n Nfreq = int(np.ceil((fn-f0)/df))\n \n A = np.exp(1j*2.*pi*f0*time) * signal\n B = np.exp(1j*2.*pi*df*time)\n ft = np.zeros(Nfreq, complex) \n ft[0] = A.sum()\n for k in range(1,Nfreq):\n A *= B\n ft[k] = np.sum(A)\n \n if full_output:\n return freqs,ft**2*4.0/Ntime**2\n else:\n return freqs,(ft.real**2 + ft.imag**2) * 4.0 / Ntime**2", "def spatialFourierTransform(self, Amplitude):\n return self.ft_coefficient * (np.fft.fftn(Amplitude))", "def DFTdirect(x):\n \n N = len(x)\n X = []\n for k in range(N):\n X_k = Complex(0,0)\n for n in range(N):\n a = 2*math.pi*k*n/N\n X_k += x[n]*Complex(math.cos(a), -math.sin(a))\n X.append(X_k)\n return X", "def get_fft(signal):\n fs = 100.\n Fk = np.fft.rfft(signal)/len(signal)\n \n f = np.fft.rfftfreq(len(signal), 1/fs)\n #remove the noise\n Fk[0] = 0\n \n return Fk, f", "def get_fft(self):\n\t\t# Get the \"ideal\" evenly spaced times\n\t\teven_times = numpy.linspace(self.buf[0][0], self.buf[-1][0], len(self.buf))\n\t\t\n\t\t# Interpolate the data to generate evenly temporally spaced samples\n\t\tinterpolated = numpy.interp(even_times, *zip(*self.buf))\n\t\t\n\t\t# Perform the FFT\n\t\tfft = numpy.fft.rfft(interpolated)\n\t\treturn zip(numpy.abs(fft), numpy.angle(fft))", "def fft2(X):\r\n # return scipy.fftpack.fft2(X)\r\n return np.fft.fft2(X)", "def frft(f, a):\n ret = numpy.zeros_like(f, dtype=numpy.complex)\n f = f.copy().astype(numpy.complex)\n N = len(f)\n shft = numpy.fmod(numpy.arange(N) + numpy.fix(N / 2), N).astype(int)\n sN = numpy.sqrt(N)\n a = numpy.remainder(a, 4.0)\n\n # Special cases\n if a == 0.0:\n return f\n if a == 2.0:\n return numpy.flipud(f)\n if a == 1.0:\n ret[shft] = numpy.fft.fft(f[shft]) / sN\n return ret\n if a == 3.0:\n ret[shft] = numpy.fft.ifft(f[shft]) * sN\n return ret\n\n # reduce to interval 0.5 < a < 1.5\n if a > 2.0:\n a = a - 2.0\n f = numpy.flipud(f)\n if a > 1.5:\n a = a - 1\n f[shft] = numpy.fft.fft(f[shft]) / sN\n if a < 0.5:\n a = a + 1\n f[shft] = numpy.fft.ifft(f[shft]) * sN\n\n # the general case for 0.5 < a < 1.5\n alpha = a * numpy.pi / 2\n tana2 = numpy.tan(alpha / 2)\n sina = numpy.sin(alpha)\n f = numpy.hstack((numpy.zeros(N - 1), sincinterp(f), numpy.zeros(N - 1))).T\n\n # chirp premultiplication\n chrp = numpy.exp(-1j * numpy.pi / N * tana2 / 4 *\n numpy.arange(-2 * N + 2, 2 * N - 1).T ** 2)\n f = chrp * f\n\n # chirp convolution\n c = numpy.pi / N / sina / 4\n tmp = numpy.exp(1j * c * numpy.arange(-(4 * N - 4), 4 * N - 3).T ** 2)\n ret = scipy.signal.fftconvolve(\n numpy.exp(1j * c * numpy.arange(-(4 * N - 4), 4 * N - 3).T ** 2),\n f\n )\n ret = ret[4 * N - 4:8 * N - 7] * numpy.sqrt(c / numpy.pi)\n\n # chirp post multiplication\n ret = chrp * ret\n\n # normalizing constant\n ret = numpy.exp(-1j * (1 - a) * numpy.pi / 4) * ret[N - 1:-N + 1:2]\n\n return ret" ]
[ "0.7334551", "0.73286504", "0.73249704", "0.72870594", "0.7284747", "0.72186106", "0.7010064", "0.6952721", "0.6820437", "0.6792979", "0.6747964", "0.6740338", "0.67176574", "0.6682443", "0.66758895", "0.6668562", "0.66177744", "0.65891063", "0.6576105", "0.657266", "0.6550333", "0.64981914", "0.6497019", "0.6494018", "0.6457977", "0.6445621", "0.6445451", "0.6444788", "0.64426816", "0.6429371" ]
0.75178254
0
This function calculates the magnitude of derivative of an image using convolution
def conv_der(im): derevitive_conv = np.array([[1], [-1]]) dx = scipy.signal.convolve2d(im, derevitive_conv, 'same') dy = scipy.signal.convolve2d(im, derevitive_conv.transpose(), 'same') magnitude = np.sqrt(np.abs(dx)**2 + np.abs(dy)**2) return magnitude.real.astype(np.float64)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conv_der(im):\n im = im.astype(np.float64)\n # set der x/y matrix\n der_x = np.array([[1, 0, -1]])\n der_y = np.array(der_x.transpose())\n # calculate the derivative to x and y\n dx = conv(im, der_x, mode='same')\n dy = conv(im, der_y, mode='same')\n\n return np.sqrt(np.abs(dx)**2 + np.abs(dy)**2) # = magnitude", "def convDerivative(inImage: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\r\n kernel_x = np.array([[0, 0, 0], [1, 0, -1], [0, 0, 0]])\r\n kernel_y = np.array([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\r\n\r\n # derivative by rows:\r\n Ix = cv2.filter2D(inImage, -1, kernel_x) # , borderType=cv2.BORDER_REPLICATE\r\n\r\n # derivative by columns:\r\n Iy = cv2.filter2D(inImage, -1, kernel_y)\r\n\r\n eps = 0.0000000001\r\n magnitude = pow(Ix ** 2 + Iy ** 2, 0.5)\r\n direction = np.arctan(Iy / (Ix + eps))\r\n\r\n return direction, magnitude, Ix, Iy", "def compute_gradmag(image_arr):\n assert image_arr.ndim == 2\n dy = sobel(image_arr, axis=0)\n dx = sobel(image_arr, axis=1)\n return np.hypot(dx, dy)", "def backward_energy(im):\n\n xgrad = ndi.convolve1d(im, np.array([1, 0, -1]), axis=1, mode='wrap')\n ygrad = ndi.convolve1d(im, np.array([1, 0, -1]), axis=0, mode='wrap')\n\n grad_mag = np.sqrt(np.sum(xgrad ** 2, axis=2) + np.sum(ygrad ** 2, axis=2))\n\n return grad_mag", "def fftdeconvolve(image, kernel):\n x = numpy.fft.fftshift(numpy.fft.fftn(image))\n y = numpy.fft.fftshift(numpy.fft.fftn(kernel))\n\n return numpy.real(numpy.fft.fftshift(\n numpy.fft.ifftn(numpy.fft.ifftshift(x / y))))", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def compute_derivatives(im1, im2):\n assert im1.shape == im2.shape\n \n Ix = np.empty_like(im1)\n Iy = np.empty_like(im1)\n It = np.empty_like(im1)\n\n #\n # Your code here\n #\n \n # Taken from: Lecture 3 (filtering continued) - Slide 39\n # print(\"Calculating convolutions for derivatives. This might take a while.\")\n # D_x = 1/6 * np.array([[1, 0, -1], [1, 0, -1], [1, 0, -1]])\n # D_y = 1/6 * np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])\n\n # Vereinfachte Kernel. Haben kein smoothing, nur die Ableitung\n D_x = 1/2 * np.array([1, 0, -1]).reshape((1,3))\n D_y = 1/2 * np.array([1, 0, -1]).reshape((3,1))\n\n \n Ix = convolve2d(im1, D_x, mode=\"same\", boundary=\"symm\")\n Iy = convolve2d(im1, D_y, mode=\"same\", boundary=\"symm\")\n It = im2 - im1\n\n # Debugging\n ## print(\"Following prints should all have the same shape: \")\n ## print(\"shape Im: \", im1.shape)\n ## print(\"shape Ix: \", Ix.shape)\n ## print(\"shape Iy: \", Iy.shape)\n ## print(\"shape It: \", It.shape)\n ## print(\"\\n\")\n\n assert Ix.shape == im1.shape and \\\n Iy.shape == im1.shape and \\\n It.shape == im1.shape\n\n return Ix, Iy, It", "def convolve(img, fourier_kernel):\n return np.fft.ifftshift(np.fft.irfft2(np.fft.rfft2(img) * fourier_kernel))", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n \"\"\"\n\tcompared to the 4a solution this just adds padding to the filter if its smaller than the image\n\tthis is done by using the second parameter in fft.fft2 \n\t\n\tfirst it applies fourier transforms on the kernel and the image\n\tthen it sets the image to be the pointwise multiplication of the transforms\n\n the image is inverse fourier transformed and filtered for real values\n the domain image is shifted and taken the absolute value of\n the fourier transform of the image and kernel are also shifted and set to be the absolute value\n\tlastly everything is displayed in the subplots\n \"\"\"\n conv_result = im \n \n if verbose:\n fftKernel=np.fft.fft2(kernel,im.shape)\n fftImage=np.fft.fft2(conv_result)\n\t\t\n\t\t\n\t\t\n conv_result=np.multiply(fftImage,fftKernel)\n fftImageTransformed=conv_result\n\t\t\n \n conv_result=np.fft.ifft2(conv_result)\n \n conv_result=np.real(conv_result)\n\n fftImageTransformed=np.fft.fftshift(fftImageTransformed)\n fftImage=np.fft.fftshift(fftImage)\n fftKernel=np.fft.fftshift(fftKernel)\n\n fftImageTransformed=np.absolute(fftImageTransformed)\n fftImage=np.absolute(fftImage)\n fftKernel=np.absolute(fftKernel)\n\t\t\n\t\t\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n plt.imshow(fftImage, cmap=\"gray\")\n plt.subplot(1, 5, 3)\n plt.imshow(fftKernel, cmap=\"gray\")\n plt.subplot(1, 5, 4)\n plt.imshow(fftImageTransformed, cmap=\"gray\")\n plt.subplot(1, 5, 5)\n plt.imshow(conv_result, cmap=\"gray\")\n ### END YOUR CODE HERE ###\n return conv_result", "def moffat_convolution_fft(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\n im_kernel_array = moffat_kernel(n_fwhm,beta,r_s)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)", "def fourierCV(img):\n\tgray = grayscale(img)\n\tdft = cv2.dft(np.float32(gray), flags = cv2.DFT_COMPLEX_OUTPUT)\n\tdft_shift = np.fft.fftshift(dft)\n\tmagnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))\n return magnitude_spectrum", "def deconvolution(obs, green, lambd):\n\n nr, nt = obs.shape\n num = np.zeros(nt)\n den = np.zeros(nt)\n\n for ir in range(len(obs)):\n\n OBS = fft(obs[ir, :])\n GRE = fft(green[ir, :])\n\n # Sum all\n num = num + np.conj(GRE) * OBS\n den = den + np.conj(GRE) * GRE\n\n # Get maximum value of denominator\n maxden = np.max(np.abs(den))\n\n # Waterlevel\n wl = lambd * maxden\n\n # Deconvolution using the waterlevel\n src = np.real(ifft(num / (den+wl).T))\n\n # Compute fit to original data\n res = obs\n chi0 = 0.5 * np.sum(np.sum(res ** 2))\n\n syn = compute_synth(green, src)\n res = obs - syn\n chi = 0.5 * np.sum(np.sum(res ** 2))\n\n print(chi/chi0)\n\n return src, syn", "def magnitude_of_gradient(grad_f):\n return np.sqrt(np.ufunc.reduce(np.add, [x**2 for x in grad_f]))", "def clConvolution(self, size, mask):", "def ddx(a):\n\t# avoid corner effects\n\tthick = 2\n\ta = np.concatenate((a[(thick-1)::-1],a,a[:-(thick+1):-1]))\n\tmode=\"same\"\n\tmode = \"valid\"\n\t\n\tK = np.array([-0.5,0,0.5])\n\tda = scig.convolve(a,K,mode=mode)\n\treturn da", "def image_derivatives(image):\n\t\n\tsobel_sign = np.array([[-1, 0, 1]])\n\tsobel_mag = np.array([[1, 2, 1]])\n\n\ttemp1 = conv2d(image, sobel_sign)\n\timage_dx = conv2d(temp1, sobel_mag.T)\n\n\ttemp2 = conv2d(image, sobel_mag)\n\timage_dy = conv2d(temp2, -sobel_sign.T)\n\t\n\treturn image_dx, image_dy\n\n\t# save these for comparison\n\timage_dx_1, image_dy_1 = image_dx, image_dy\n\n\t# Slower alternative (from OpenCV docs):\n\tsobel_x = np.array([\n\t\t[-1, 0, 1],\n\t\t[-2, 0, 2],\n\t\t[-1, 0, 1],\n\t])\n\n\timage_dx = conv2d(image, sobel_x)\n\timage_dy = conv2d(image, -sobel_x.T)\n\tassert np.all(np.isclose(image_dy, image_dy_1))\n\tassert np.all(np.isclose(image_dx, image_dx_1))\n\treturn image_dx, image_dy", "def wiener_deconvolution(img, otf, sn_power_ratio, snr_includes_otf=False):\n if snr_includes_otf:\n wfilter = otf.conj() / (np.abs(otf)**2 * (1 + 1 / sn_power_ratio))\n else:\n wfilter = otf.conj() / (np.abs(otf) ** 2 + 1 / sn_power_ratio)\n\n wfilter[np.isnan(wfilter)] = 0\n img_deconvolved = img * wfilter\n\n return img_deconvolved, wfilter", "def edge_magnitude(edge_x, edge_y):\n c = copy.deepcopy(edge_x)\n #print(len(c), len(c[0]))\n for i in range(len(edge_x)):\n for j in range(len(edge_x[0])):\n \n c[i][j] = ((edge_x[i][j]**2 + edge_y[i][j]**2)**0.5)\n \n #print(max([max(i) for i in c]))\n #print(min([min(i) for i in c]))\n c = normalize(c)\n return c\n # TODO: implement this function.\n #raise NotImplementedError\n #return edge_mag", "def calc_fft_mag(self, ch_id: int, func_id: int) -> None:\n self.write(':function{0}:fftmagnitude channel{1}'.format(func_id, ch_id))", "def test_conv2d():\n img = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n template = np.array([\n [1, 0],\n [1, 0],\n ])\n template = np.flipud(np.fliplr(template))\n return fftconvolve(img, template, mode='valid')", "def moffat_convolution(im_array,n_fwhm,beta,fwhm) :\n\n r_s = fwhm/(2. *math.sqrt(2.**(1./beta)-1.))\n\t\n im_kernel_array = gauss_kernel(n_fwhm,beta,r_s)\n conv_image = signal.convolve(im_array,im_kernel_array,mode = 'same')\n\n return (conv_image)", "def deconvolve(num, den, n=None):\n num = np.atleast_1d(num)\n den = np.atleast_1d(den)\n N = len(num)\n D = len(den)\n if D > N and n is None:\n quot = []\n rem = num\n else:\n if n is None:\n n = N - D + 1\n input = np.zeros(n, float)\n input[0] = 1\n quot = signal.lfilter(num, den, input)\n num_approx = signal.convolve(den, quot, mode=\"full\")\n if len(num) < len(num_approx): # 1d only ?\n num = np.concatenate((num, np.zeros(len(num_approx) - len(num))))\n rem = num - num_approx\n return quot, rem", "def op(self, img):\n return self._mask * np.fft.fft2(img, norm=\"ortho\")", "def gauss_convolution_fft(im_array, n_fwhm, fwhm) :\n \n sigma = fwhm / (2.*math.sqrt(2.*math.log(2.)))\n\t\n im_kernel_array = gauss_kernel(n_fwhm, sigma)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)", "def compute_blendedness_aperture(img_central, img_others, radius):\n if isinstance(img_central, galsim.image.Image):\n ic = np.array(img_central.array.data)\n io = np.array(img_others.array.data)\n else :\n ic = img_central\n io = img_others\n h, w = ic.shape\n mask = plot.createCircularMask(h, w, center=None, radius=radius)\n flux_central = np.sum(ic*mask.astype(float))\n flux_others = np.sum(io*mask.astype(float))\n return flux_others / (flux_central+flux_others)", "def convolution(image, kernel):\n\n #Se encuentra la dimencion de la imagen\n if len(image.shape) == 3: #De 3 dimenciones\n print(\"Dimenciones de imagen: {}\".format(image.shape))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Se cambia a dos dimenciones\n print(\"Nuevas dimenciones: {}\".format(image.shape))\n else:\n print(\"Dimenciones de imagen: {}\".format(image.shape))\n\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n kernel_row, kernel_col = kernel.shape #asigna alto y ancho del filtro\n\n output_x = (image_col - (kernel_col / 2) * 2) + 1 #asigna el ancho del output\n output_y = (image_row - (kernel_row / 2) * 2) + 1 #asigna el alto del output\n \n output = np.zeros([int(output_y), int(output_x)]) #matriz donde se guarda el resultado\n\n padded_size = int((kernel_row - 1) / 2) #Tamaño de padding\n\n #Obtenemos la imagen con padding\n padded_image = padding(image,padded_size)\n \n for row in range(int(output_y)):\n for col in range(int(output_x)):\n output[row, col] = conv_helper(\n padded_image[row:row + kernel_row, \n col:col + kernel_col], kernel)\n \n # Se muestra la imagen en pantalla\n plt.imshow(output, cmap='gray')\n plt.title(\"Edge detection\")\n plt.show()\n\n return output", "def calculateFLOPs(self):\n\n print('\\nDilation Rate is: {}\\n'.format(self.dilation_rate))\n\n if self.dilation_rate is not 0:\n self.filter_shape = (self.filter_shape[0],self.filter_shape[1],self.filter_shape[2]+2*self.dilation_rate,self.filter_shape[3]+2*self.dilation_rate)\n\n ## Simple/Dilated Convolution Operation \n if self.filter_shape[1] == 0:\n num_operands = self.filter_shape[2] * self.filter_shape[3]\n else:\n num_operands = self.filter_shape[1] * self.filter_shape[2] * self.filter_shape[3]\n \n flops_per_instance_conv = 2*num_operands - 1 ## num_operands for multiplications and num_operands - 1 for additions\n \n num_instance_row = ((self.input_shape[2] - self.filter_shape[2] + 2*self.padding)/self.stride) + 1\n num_instance_column = ((self.input_shape[3] - self.filter_shape[3] + 2*self.padding)/self.stride) + 1\n \n num_instance = num_instance_column * num_instance_row * self.filter_shape[0] ## multiply by number of filters\n \n total_flops_single_input_conv = num_instance * flops_per_instance_conv\n \n total_batch_flops_conv = total_flops_single_input_conv * self.input_shape[0]\n\n self.num_flops += total_batch_flops_conv\n \n if total_batch_flops_conv / 1e9 > 1: # for GFLOPs\n if self.dilation_rate is 0:\n print('Convolution: {} GFLOPs\\n'.format(total_batch_flops_conv / 1e9 ))\n else:\n print('Dilated Convolution: {} GFLOPs\\n'.format(total_batch_flops_conv / 1e9 ))\n else:\n if self.dilation_rate is 0:\n print('Convolution: {} MFLOPs\\n'.format(total_batch_flops_conv / 1e6 ))\n else:\n print('Dilated Convolution: {} GFLOPs\\n'.format(total_batch_flops_conv / 1e6 ))\n\n ## Pooling Operation\n total_flops_single_input_pool = num_instance\n total_batch_flops_pool = total_flops_single_input_pool * self.input_shape[0]\n\n self.num_flops += total_batch_flops_pool\n \n if total_batch_flops_pool / 1e9 > 1: # for GFLOPs\n print('Pooling: {} GFLOPs\\n'.format(total_batch_flops_pool / 1e9 ))\n else:\n print('Pooling: {} MFLOPs\\n'.format(total_batch_flops_pool / 1e6 ))\n \n ## BN Operation\n total_flops_mean_bn = num_instance * (self.input_shape[0] - 1 + 1) ## number of additions + 1 division\n total_flops_std_bn = num_instance * (2*self.input_shape[0] + self.input_shape[0] - 1 + 1) ## subtract mean and square operations + additions + division\n total_batch_flops_bn = (total_flops_mean_bn + total_flops_std_bn) * self.input_shape[0]\n\n self.num_flops += total_batch_flops_bn\n \n if total_batch_flops_bn / 1e9 > 1: # for GFLOPs\n print('Batch-Normalization: {} GFLOPs\\n'.format(total_batch_flops_bn / 1e9 ))\n else:\n print('Batch-Normalization: {} MFLOPs\\n'.format(total_batch_flops_bn / 1e6 ))\n \n ## RELU Activation\n num_flops_activation = num_instance_row * num_instance_column * self.filter_shape[0] * 2 ## For RELU activation, 1 comparison and 1 multiplication\n batch_flops_activation = num_flops_activation * self.input_shape[0]\n\n self.num_flops += batch_flops_activation\n\n if batch_flops_activation / 1e9 > 1: # for GFLOPs\n print('RELU: {} GFLOPs\\n'.format(batch_flops_activation / 1e9 ))\n else:\n print('RELU: {} MFLOPs\\n'.format(batch_flops_activation / 1e6 ))\n\n ## Total FLOPs\n if self.num_flops / 1e9 > 1: # for GFLOPs\n print('Total FLOPs: {} GFLOPs\\n'.format(self.num_flops / 1e9 ))\n else:\n print('Total FLOPs: {} MFLOPs\\n'.format(self.num_flops / 1e6 ))\n \n ## Percentage Contribution\n\n print('Operation \\t Percentage Contribution')\n print('------------------------------------------------')\n print('Convolution \\t {} %'.format(total_batch_flops_conv*100/self.num_flops))\n print('Pooling \\t {} %'.format(total_batch_flops_pool*100/self.num_flops))\n print('Batch-Normalization \\t {} %'.format(total_batch_flops_bn*100/self.num_flops))\n print('RELU \\t {} %'.format(batch_flops_activation*100/self.num_flops))", "def _irfft2d(f_x) :", "def calculate_magnitudes(data, frame_count, nb_channels):\n if nb_channels == 2: # Strip every other sample point to keep only one channel\n data = np.array(struct.unpack('{n}h'.format(n=nb_channels * frame_count), data))[::2]\n else:\n data = np.array(struct.unpack('{n}h'.format(n=nb_channels * frame_count), data))\n \n windowed_data = np.multiply(data, np.hanning(len(data)))\n \n # Calculate the Fourier Transform coefficients\n dft_array = cv2.dft(np.float32(windowed_data))\n\n # Return the power in each frequency\n magnitudes = np.add(np.sqrt((dft_array*dft_array).sum(axis=1)), 10)\n log_mag = np.log10(magnitudes)\n return log_mag", "def get_gradient(pixels,processing = 'normalize'):\n horgradient = ndimage.sobel(pixels, axis = 1)\n vergradient = ndimage.sobel(pixels, axis = 0)\n gradient = np.array((vergradient,horgradient))\n\n if processing == 'normalize':\n \"\"\"Normalizing the gradient\"\"\"\n gradnorm = 0.2*np.max(np.linalg.norm(gradient,axis = -1))\n gradient = gradient / gradnorm\n\n return gradient" ]
[ "0.7183386", "0.69026047", "0.6638305", "0.62914103", "0.6233699", "0.6149139", "0.60923374", "0.6032786", "0.59937316", "0.5830059", "0.5819746", "0.5730074", "0.571061", "0.56998205", "0.56732875", "0.56361973", "0.561787", "0.5611231", "0.5599725", "0.5590067", "0.55685765", "0.5556649", "0.5554998", "0.5550048", "0.5533901", "0.55296427", "0.55126196", "0.5512618", "0.5505247", "0.5500549" ]
0.77950674
0
This function calculates the magnitude of derivative of an image using Fourier transform
def fourier_der(im): ft_img = DFT2(im) ft_img = np.fft.fftshift(ft_img) n_x = im.shape[1] coeff_x = (2 * np.pi * 1j)/n_x u_freq = np.array([n if n < int(n_x/2) else (n-n_x) for n in range(n_x)]) * 1j u_freq = np.array([np.fft.fftshift(u_freq)]*im.shape[0]).transpose() dx_ft = coeff_x * IDFT2(np.fft.ifftshift(u_freq.transpose() * ft_img)) m_y = im.shape[0] coeff_y = (2 * np.pi * 1j)/m_y v_freq = np.array([m if m < int(m_y/2) else (m-m_y) for m in range(m_y)]) * 1j v_freq = np.array([np.fft.fftshift(v_freq)] * im.shape[1]).transpose() tr = IDFT2(np.fft.ifftshift(v_freq * ft_img)) dy_ft = coeff_y * tr magnitude = np.sqrt(np.abs(dx_ft)**2 + np.abs(dy_ft)**2) return magnitude.real.astype(np.float64)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fourier_der(im):\n im = im.astype(np.float64)\n # constants\n M, N = im.shape\n u = np.meshgrid(np.arange(N), np.arange(M))[0] - N//2\n v = np.meshgrid(np.arange(N), np.arange(M))[1] - M//2\n u_der, v_der = (2 * np.pi * 1j / N), (2 * np.pi * 1j / M)\n\n # calculate dx, dy\n dx = u_der * IDFT2(np.fft.fftshift(u) * DFT2(im))\n dy = v_der * IDFT2(np.fft.fftshift(v) * DFT2(im))\n\n return np.sqrt(np.abs(dx)**2 + np.abs(dy)**2) # = magnitude", "def fourierCV(img):\n\tgray = grayscale(img)\n\tdft = cv2.dft(np.float32(gray), flags = cv2.DFT_COMPLEX_OUTPUT)\n\tdft_shift = np.fft.fftshift(dft)\n\tmagnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0], dft_shift[:,:,1]))\n return magnitude_spectrum", "def conv_der(im):\n im = im.astype(np.float64)\n # set der x/y matrix\n der_x = np.array([[1, 0, -1]])\n der_y = np.array(der_x.transpose())\n # calculate the derivative to x and y\n dx = conv(im, der_x, mode='same')\n dy = conv(im, der_y, mode='same')\n\n return np.sqrt(np.abs(dx)**2 + np.abs(dy)**2) # = magnitude", "def fourierNP(img):\n\tgray = grayscale(img)\n\tf = np.fft.fft2(gray)\n\tfshift = np.fft.fftshift(f)\n\tmagnitude_spectrum = 20*np.log(np.abs(fshift)) # RETURN THIS\n return magnitude_spectrum", "def conv_der(im):\n derevitive_conv = np.array([[1], [-1]])\n dx = scipy.signal.convolve2d(im, derevitive_conv, 'same')\n dy = scipy.signal.convolve2d(im, derevitive_conv.transpose(), 'same')\n magnitude = np.sqrt(np.abs(dx)**2 + np.abs(dy)**2)\n\n return magnitude.real.astype(np.float64)", "def get_magnitude(frames, num_fft):\n complex_spec = np.fft.rfft(frames, num_fft)\n return np.absolute(complex_spec)", "def calc_fft_mag(self, ch_id: int, func_id: int) -> None:\n self.write(':function{0}:fftmagnitude channel{1}'.format(func_id, ch_id))", "def _irfft2d(f_x) :", "def compute_gradmag(image_arr):\n assert image_arr.ndim == 2\n dy = sobel(image_arr, axis=0)\n dx = sobel(image_arr, axis=1)\n return np.hypot(dx, dy)", "def get_fft_mag(self, func_id: int) -> float:\n return float(self.query(':measure:fft:magnitude? function{}'.format(func_id)))", "def DFT2(image):\n full_dft2 = DFT(DFT(image.transpose()).transpose())\n return full_dft2.astype(np.complex128)", "def calculate_magnitudes(data, frame_count, nb_channels):\n if nb_channels == 2: # Strip every other sample point to keep only one channel\n data = np.array(struct.unpack('{n}h'.format(n=nb_channels * frame_count), data))[::2]\n else:\n data = np.array(struct.unpack('{n}h'.format(n=nb_channels * frame_count), data))\n \n windowed_data = np.multiply(data, np.hanning(len(data)))\n \n # Calculate the Fourier Transform coefficients\n dft_array = cv2.dft(np.float32(windowed_data))\n\n # Return the power in each frequency\n magnitudes = np.add(np.sqrt((dft_array*dft_array).sum(axis=1)), 10)\n log_mag = np.log10(magnitudes)\n return log_mag", "def extract(image):\n # calculate fft\n spectrum = np.fft.fft2(image)\n fshift = np.fft.fftshift(spectrum) # to make the magnitude graph with the lower frequency in the middle\n\n # calculate phase and magnitude\n magnitude = np.abs(fshift)\n phase = np.angle(fshift)\n\n return magnitude, phase", "def calculate_mags(self):\n res = numpy.fft.rfft(self.cur_input)\n self.mags = []\n for num in res[1:]:\n real = float(numpy.real(num))\n imag = float(numpy.imag(num))\n mag = math.sqrt((real**2)+(imag**2))\n self.mags.append(mag)", "def DFT2(image):\n image = image.astype(np.float64)\n M, N = image.shape\n\n # build the dft2_matrix transform\n omega_y = np.exp(-2 * np.pi * 1j / M)\n u, v = np.meshgrid(np.arange(M), np.arange(M))\n dft2_matrix = np.power(omega_y, u*v)\n\n # calculate the 2D fourier transform\n fourier_image = np.dot(dft2_matrix, DFT(image))\n\n return fourier_image", "def fd(f, x, h, fl=np.complex_):\n return np.divide(f(np.add(x, h, dtype=fl)) - f(x), h, dtype=fl)", "def fft(self, *args, **kwargs):\n return _image.image_fft(self, *args, **kwargs)", "def calculate_mag(self, signal, db_conversion=True):\n signal = signal.view(-1, signal.shape[-1])\n stft = torch.stft(signal, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True, normalized=False, onesided=True, pad_mode='reflect')\n mag = (stft ** 2).sum(-1)\n if db_conversion:\n mag = torch.log10(mag + 1e-08)\n return mag", "def DFT(signal):\n n = signal.shape[0]\n omega = np.exp(((((-2) * np.pi)*1j) / n))\n\n e_items = np.vander(omega**np.arange(n), n, True)\n fourier_signal = np.dot(e_items, signal)\n\n return fourier_signal.astype(np.complex128)", "def magnitude_of_gradient(grad_f):\n return np.sqrt(np.ufunc.reduce(np.add, [x**2 for x in grad_f]))", "def fourier(img):\n return fourierCV(img)", "def magnitude(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n\n # Magnitude matrix:\n dft = np.zeros([x, y], float)\n\n for i in range(0, x):\n for j in range(0, y):\n dft[i, j] = np.sqrt(np.square(np.real(matrix[i, j])) + np.square(np.imag(matrix[i, j])))\n\n\n return dft", "def dst(y):\n N = len(y)\n y2 = empty(2*N,float)\n y2[0] = y2[N] = 0.0\n y2[1:N] = y[1:]\n y2[:N:-1] = -y[1:]\n a = -imag(rfft(y2))[:N]\n a[0] = 0.0\n\n return a", "def constract(phase, magnitude):\n new_spectrum = magnitude * np.exp(1j * phase)\n\n # reverse the shift and FFT\n f_ishift = np.fft.ifftshift(new_spectrum)\n img_back = np.fft.ifft2(f_ishift)\n \n return np.abs(img_back)", "def convDerivative(inImage: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\r\n kernel_x = np.array([[0, 0, 0], [1, 0, -1], [0, 0, 0]])\r\n kernel_y = np.array([[0, 1, 0], [0, 0, 0], [0, -1, 0]])\r\n\r\n # derivative by rows:\r\n Ix = cv2.filter2D(inImage, -1, kernel_x) # , borderType=cv2.BORDER_REPLICATE\r\n\r\n # derivative by columns:\r\n Iy = cv2.filter2D(inImage, -1, kernel_y)\r\n\r\n eps = 0.0000000001\r\n magnitude = pow(Ix ** 2 + Iy ** 2, 0.5)\r\n direction = np.arctan(Iy / (Ix + eps))\r\n\r\n return direction, magnitude, Ix, Iy", "def DFT(signal):\n signal = signal.astype(np.float64)\n # find the length of the signal\n N = signal.shape[0]\n if signal.ndim == 2:\n M, N = signal.shape\n\n # calculate DFT matrix\n u, v = np.meshgrid(np.arange(N), np.arange(N))\n omega = np.exp(-2 * np.pi * 1j / N)\n dft_matrix = np.power(omega, u*v)\n\n # if it is a matrix of signals\n if signal.ndim == 2:\n # calculate the Fourier Transform\n complex_fourier_signal = np.dot(dft_matrix, signal.transpose())\n return complex_fourier_signal.transpose()\n\n # calculate the Fourier Transform\n complex_fourier_signal = np.dot(dft_matrix, signal)\n return complex_fourier_signal", "def magnitude(X):\r\n r = np.real(X)\r\n i = np.imag(X)\r\n return np.sqrt(r * r + i * i);", "def backward_energy(im):\n\n xgrad = ndi.convolve1d(im, np.array([1, 0, -1]), axis=1, mode='wrap')\n ygrad = ndi.convolve1d(im, np.array([1, 0, -1]), axis=0, mode='wrap')\n\n grad_mag = np.sqrt(np.sum(xgrad ** 2, axis=2) + np.sum(ygrad ** 2, axis=2))\n\n return grad_mag", "def complex_derivative ( fun , z , h = 0 , I = 3 , err = False , real = True , imag = True ) :\n \n Z = complex ( z )\n \n X = Z.real\n Y = Z.imag\n\n ## few altenatives to calculate the real and imaginary part\n \n if real :\n UX = lambda x : complex ( fun ( complex ( x , Y ) ) ).real\n ## Real part \n re = derivative ( UX , X , h = h , I = I , err = err )\n else :\n VY = lambda y : complex ( fun ( complex ( X , y ) ) ).imag \n ## Real part \n re = derivative ( VY , Y , h = h , I = I , err = err )\n\n if imag : \n VX = lambda x : complex ( fun ( complex ( x , Y ) ) ).imag \n ## Imaginary part \n im = derivative ( VX , X , h = h , I = I , err = err )\n else :\n UY = lambda y : complex ( fun ( complex ( X , y ) ) ).real\n ## Imaginary part \n im = -derivative ( UY , Y , h = h , I = I , err = err )\n \n if not err : return complex ( re , im )\n \n result = complex ( re.value() , im.value() )\n error = ( re.cov2() + im.cov2() ) ** 0.5 \n \n return result , error", "def apply_fft(audio):\n\treturn numpy.abs(numpy.fft.rfft(audio))" ]
[ "0.7200767", "0.71910566", "0.6819447", "0.67383516", "0.6534676", "0.6498865", "0.6467338", "0.6463012", "0.6438436", "0.6408685", "0.6262128", "0.6196817", "0.6159641", "0.6158729", "0.61358994", "0.611732", "0.6114465", "0.6101021", "0.6028547", "0.60126936", "0.60116524", "0.5996832", "0.5966018", "0.59514576", "0.5948025", "0.5940677", "0.5883908", "0.5842617", "0.5820911", "0.57525593" ]
0.7349067
0
This is a helper method to calculate the correct approximation of the gaussian kernel according to its size. Using convolution and the binomial coefficients.
def gaus_kernel_calc(kernel_size): base_gaus_binom = np.array([[1], [1]]) kernel = base_gaus_binom if kernel_size == 1: # If the kernel size is 1 we need a 2d array that keeps the image the same. kernel = np.array([[1]]) kernel = scipy.signal.convolve2d(kernel, kernel.transpose()) return kernel for i in range(kernel_size - 2): kernel = scipy.signal.convolve2d(kernel, base_gaus_binom) kernel = scipy.signal.convolve2d(kernel, kernel.transpose()) return kernel/kernel.sum()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _gaussian_kernel(kernel_size):\n curr_kernel = _binoms(kernel_size)\n curr_kernel = curr_kernel.reshape(kernel_size, 1)\n kernel2d = convolve2d(curr_kernel.transpose(), curr_kernel)\n kernel2d = np.divide(kernel2d, np.sum(kernel2d))\n return kernel2d", "def _gaussian_kernel_1d(kernel_size):\n kernel = _binoms(kernel_size)\n return np.divide(kernel, np.sum(kernel))", "def gaussian_kernel(size, sigma):\n\n kernel = np.zeros((size, size))\n\n ### YOUR CODE HERE\n k = (size-1)/2\n factor = 1/(2*np.pi*sigma**2)\n for i in range(size):\n for j in range(size):\n exponent = -((i-k)**2 +(j-k)**2)/(2*sigma**2)\n kernel[i,j] = factor*np.exp(exponent)\n ### END YOUR CODE\n\n return kernel", "def gaussian_kernel(size, sigma):\n\n m, n = [(s - 1.) / 2. for s in size]\n y, x = np.ogrid[-m:m+1, -n:n+1]\n h = np.exp(-(x*x + y*y) / (2. * sigma * sigma))\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\n sumh = h.sum()\n if sumh != 0: h /= sumh\n return h", "def gaussian_kernel(size, sigma): \n \n kernel = np.zeros((size, size))\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n k = (size - 1) / 2\n sigma_sq = sigma ** 2\n pi_sigma = 1/(2 * np.pi * sigma_sq)\n for i in range(size):\n for j in range(size):\n kernel[i, j] = pi_sigma * np.exp(-0.5 * ((i-k)**2 + (j-k)**2) / (sigma_sq))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return kernel", "def gauss_kernels(size, sigma=1.0):\n if size < 3:\n size = 3\n\n m = size / 2\n x, y = np.mgrid[-m:m + 1, -m:m + 1]\n kernel = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n kernel_sum = kernel.sum()\n\n if not sum == 0:\n kernel = kernel / kernel_sum\n\n return kernel", "def gaussian_kernel(kernel_size: (int, tuple, list), width: float):\n kernel_size = np.asarray(to_list(kernel_size, 2), np.float)\n half_ksize = (kernel_size - 1) / 2.0\n x, y = np.mgrid[-half_ksize[0]:half_ksize[0] + 1, -half_ksize[1]:half_ksize[1] + 1]\n kernel = np.exp(-(x ** 2 + y ** 2) / (2 * width ** 2))\n return kernel / (kernel.sum() + 1e-08)", "def gaussian_kernel(kernel_size: (int, tuple, list), width: float):\n\n kernel_size = np.asarray(to_list(kernel_size, 2), np.float)\n half_ksize = (kernel_size - 1) / 2.0\n x, y = np.mgrid[-half_ksize[0]:half_ksize[0] + 1,\n -half_ksize[1]:half_ksize[1] + 1]\n kernel = np.exp(-(x ** 2 + y ** 2) / (2 * width ** 2))\n return kernel / (kernel.sum() + 1e-8)", "def gaus_1d(kernel_size):\n gaus_kernel = np.array([1, 1])\n for i in range(kernel_size - 2):\n gaus_kernel = convolve(gaus_kernel, np.array([1, 1]), mode ='full')\n gaus_kernel = gaus_kernel.astype(np.float32)\n gaus_kernel /= np.sum(gaus_kernel)\n return gaus_kernel", "def makeGaussianKernel(sigma: float) -> np.ndarray:\n\n # Your code here.\n kernel_size = 8*sigma+1\n kernel = np.zeros([kernel_size,kernel_size], dtype=float)\n center = kernel_size//2\n \n \n s = 2*(sigma**2)\n sum_val = 0\n for i in range(0,kernel_size):\n for j in range(0,kernel_size):\n x = i-center\n y = j-center\n kernel[i,j] = np.exp(-(x**2+y**2) / s)\n sum_val += kernel[i,j]\n #/(np.pi * s)\n sum_val = 1/sum_val\n print(\"here is the kernel\", kernel*sum_val)\n return kernel*sum_val", "def gauss_kernel(size, size_y=None):\n size = int(size)\n if not size_y:\n size_y = size\n else:\n size_y = int(size_y)\n\n x, y = mgrid[-size: size + 1, -size_y: size_y + 1]\n\n g = exp(-(x ** 2 / float(size) + y ** 2 / float(size_y)))\n return g / g.sum()", "def gaussian2d(filter_size=5, sig=1.0):\n ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig))\n return kernel / np.sum(kernel)", "def gaussian_kernel(windowX, windowY, sigma):\n X,Y = createKernalWindowRanges(windowX, windowY, increment)\n \n gKernel = gaussianNormalised(X, 0, sigma) * gaussianNormalised(Y, 0, sigma)\n gSum = np.sum(np.abs(gKernel))\n \n if gSum == 0:\n print \"Warning gaussian_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (gKernel)\n else:\n return (gKernel / gSum)", "def _generate_gaussian_kernel(self, size: int, sigma: float = 1.0, mu: float = 0.0) -> ndarray:\n # create the 1D array of equally spaced distance point of given size\n self.kernel_1d = np.linspace(-(size//2), size//2, size)\n # get the gaussian distribution of the 1D array\n self.kernel_1d = self._gaussian_distribution(\n self.kernel_1d, mu, sigma)\n\n # Compute the outer product of kernel1D tranpose and kernel1D\n self.kernel_2d = np.outer(self.kernel_1d.T, self.kernel_1d)\n # normalize the the outer product to suish the values between 0.0-1.0\n self.kernel_2d *= 1.0/self.kernel_2d.max()\n return self.kernel_2d", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()", "def gaussian_1xDerivative_kernel(windowX, windowY, sigma):\n # See [http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MARBLE/low/edges/canny.htm]\n X, Y = createKernalWindowRanges(windowX, windowY, increment)\n \n g_dx_kernel = gaussianFirstDerivative(X, 0, sigma) * gaussianNormalised(Y, 0, sigma)\n gSum = np.sum(np.abs(g_dx_kernel))\n \n if gSum == 0:\n print \"Warning dx_g_kernel:: Not normalising by sum of values, as sum = \" + str(gSum)\n return (g_dx_kernel)\n else:\n return (g_dx_kernel / gSum)", "def gaussian_kernel(shape: Tuple[int, int]=(3, 3), sigma: float=0.5):\n m, n = [int((ss - 1.) / 2.) for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n kernel = np.exp(-(x * x + y * y) / (2. * sigma * sigma))\n kernel[kernel < np.finfo(kernel.dtype).eps * kernel.max()] = 0\n sumh = kernel.sum()\n if sumh != 0:\n kernel /= sumh\n return kernel", "def gaussianKernel(size, sigma=1):\n\n colourers.info(f'Creating gaussian kernel of size {size} with sigma of {sigma}')\n size = int(size) // 2\n x, y = np.mgrid[-size:size+1, -size:size+1]\n normal = 1 / (2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0 * sigma ** 2))) * normal\n return g", "def isotropic_Gaussian(ksize=15, l=6):\n\n V = np.array([[1, 0], [0, -1]])\n D = np.array([[l, 0], [0, l]])\n Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))\n k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)\n\n return k", "def apply_gaussian_resolution(self,params,data,fwhm=1,dE=0.01,E_max=100):\n print('\\n################### CONVOLUTION #####################\\n')\n print(f'\\n\\tConvolution with Gaussian function, FWHM = {fwhm} meV\\n')\n\n data.fwhm = fwhm\n c = fwhm/2.35482\n\n data.dE = dE\n data.E_max = E_max\n data.spectra_E = np.arange(0,data.E_max+data.dE,data.dE)\n data.spectra_num_E = len(data.spectra_E)\n data.spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n data.smooth_spectra = np.zeros((data.spectra_num_E,params.num_Qpoints))\n structure_factors = []\n energies = []\n\n ### sum intensity of degenerate bands\n if params.sum_degenerate_bands == True:\n print('\\n\\tSumming degenerate bands before convolution (using convolution dE as tolerance)\\n')\n for q in range(params.num_Qpoints):\n sfac = data.structure_factors[:,q]\n energy = data.frequencies[f'{q}']\n reduced_energies = []\n summed_sfac = []\n while True:\n if len(energy) == 0:\n break\n test_energy = energy[0]\n reduced_energies.append(test_energy)\n indicies = np.intersect1d(np.argwhere(energy <= (test_energy+data.dE)),\n np.argwhere(energy > (test_energy-data.dE)))\n summed_sfac.append(sfac[indicies].sum())\n sfac = np.delete(sfac,indicies)\n energy = np.delete(energy,indicies)\n energies.append(reduced_energies)\n structure_factors.append(summed_sfac)\n else:\n print('\\n\\tWARNING: You should definitely sum degenerate bands!!!\\n')\n for q in range(params.num_Qpoints):\n energies.append(data.frequencies[f'{q}'])\n structure_factors.append(data.structure_factors[:,q])\n\n ### populate array for heatmap\n ### try statement takes care of negative energies\n for q in range(params.num_Qpoints):\n for b in range(len(structure_factors[q][:])):\n try: # if there are negative modes, argwhere returns an empty vector and the slice crashes\n data.spectra[np.argwhere(data.spectra_E <= \n energies[q][b]).max(),q] = structure_factors[q][b]\n except:\n continue\n\n if params.bose_factor == True:\n print('\\n\\tWARNING: Bose factor isnt verified. Need to compare to SNAXS.\\n')\n if params.temperature < 5:\n temperature = 5\n else:\n temperature = params.temperature\n inds = np.argwhere(data.spectra_E <= 0.5)\n tmp_e = np.copy(data.spectra_E)\n tmp_e[inds] = 0.5\n bose = 1+1/(np.exp(tmp_e/(constants.kb*1000*temperature))-1)\n bose = np.tile(bose.reshape((data.spectra_num_E,1)),reps=(1,params.num_Qpoints))\n data.spectra = np.multiply(data.spectra,bose)\n data.spectra = data.spectra/np.max(data.spectra)\n\n ### gaussian convolution using for loops, slow but very little memory utilization\n g_energy = np.append(data.spectra_E-data.spectra_E.max(),data.spectra_E[1:])\n gaussian = np.exp(-0.5*g_energy**2/c**2)/c/np.sqrt(2*np.pi)\n gaussian = np.tile(gaussian.reshape((gaussian.shape[0],1)),(1,data.num_Qpoints))\n tmp = np.append(data.spectra,data.spectra,axis=0)[1:,:]\n for e in range(data.spectra_num_E):\n if e%50 == 0:\n print(f'\\t------ {e}/{data.spectra_num_E} -------')\n data.smooth_spectra[e,:] = np.trapz(tmp*np.roll(gaussian,shift=e,axis=0),g_energy,axis=0)\n print('\\n\\tDone convolving!\\n')\n data.smooth_spectra = data.smooth_spectra/np.max(data.smooth_spectra)\n\n# if params.random_background == True:\n# data.smooth_spectra = data.smooth_spectra+(np.random.normal(0,1,\n# (data.smooth_spectra.shape[0],data.smooth_spectra.shape[1])))*0.001\n \n plt.imshow(data.smooth_spectra,origin='lower',aspect='auto',cmap='hot')\n plt.show()", "def Gaussiankernel(size, sigma=1): \n size = int(size) // 2\n # create x grid and y grid\n x, y = np.mgrid[-size:size+1, -size:size+1] \n # gaussian distribution formula\n normal = 1 / np.sqrt(2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0*sigma**2))) * normal\n \n return g/g.sum()", "def generate_gaussian_kernel(shape=(3,3),sigma=0.8):\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n return h", "def _binoms(kernel_size):\n if kernel_size > 1:\n curr_kernel = BASE_KERNEL\n for i in range(2, kernel_size):\n curr_kernel = np.convolve(curr_kernel, BASE_KERNEL)\n return curr_kernel\n return np.array([1])", "def convolve2d(img, kernel):\n #Flip the kernel\n kernel = utils.flip2d(kernel) \n #print(len(kernel))\n \n c = copy.deepcopy(img)\n \n #print(len(c))\n #Padd the image\n pad = int((len(kernel)-1)/2)\n\n\n padded_img = utils.zero_pad(img,pad,pad)\n #print(len(padded_img), len(padded_img[0]))\n #print(len(kernel))\n #print(len(img)**2)\n og_img=[]\n#c = copy.deepcopy(img)\n j=0\n offset = 0\n for m in range(len(img) * len(img[0])): # size of kernel x kernel\n x = []\n \n for i in range(len(kernel)): #3 is kernel size\n #print(i,j)\n x.append(padded_img[i+offset][j:j+len(kernel)])\n #print((x))\n sum = 0\n for k in range(len(kernel)):\n for l in range(len(kernel[0])):\n sum+= x[k][l] * kernel[k][l]\n #print(i,j)\n #print(sum)\n og_img.append(sum) \n j+=1\n if (j == len(img[0])):\n j = 0\n offset+= 1\n \n #print(len(img), len(img[0]))\n final_img = []\n for i in range(0,(len(img)*len(img[0])),len(img[0])):\n final_img.append(og_img[i:i+len(img[0])])\n #print(len(final_img)), len(final_img[0])\n return final_img\n\n # TODO: implement this function.", "def get_kernel(kernel_size, blur=1 / 20, halo=.001):\n\n # generate x and y grids\n x, y = np.mgrid[0:kernel_size * 2 + 1, 0:kernel_size * 2 + 1]\n\n center = kernel_size + 1 # center pixel\n r = np.sqrt((x - center) ** 2 + (y - center) ** 2) # distance from center\n\n # now compute the kernel. This function is a bit arbitrary.\n # adjust this to get the effect you want.\n kernel = np.exp(-r / kernel_size / blur) + (1 - r / r[center, 0]).clip(0) * halo\n return kernel", "def edge_kernel(isotropic):\n if isotropic:\n edge_kernel = - 1.0 * np.ones([3, 3, 3], np.float64)\n edge_kernel[1, 1, 1] = 26.0\n else:\n edge_kernel = - 1.0 * np.ones([1, 3, 3], np.float64)\n edge_kernel[0, 1, 1] = 8\n return edge_kernel", "def GaussianKernel(shape=(3, 3), sigma=0.5):\r\n radius_x, radius_y = [(radius-1.)/2. for radius in shape]\r\n y_range, x_range = np.ogrid[-radius_y:radius_y+1, -radius_x:radius_x+1]\r\n h = np.exp(- (x_range*x_range + y_range*y_range) / (2.*sigma*sigma))\r\n h[h < np.finfo(h.dtype).eps*h.max()] = 0\r\n sumofh = h.sum()\r\n if sumofh != 0:\r\n h /= sumofh\r\n return h", "def GaussianKernel(radius, std):\n size = 2 * radius + 1\n weight = torch.ones(size, size)\n weight.requires_grad = False\n for i in range(-radius, radius+1):\n for j in range(-radius, radius+1):\n dis = (i * i) + (j * j)\n weight[i+radius][j+radius] = np.exp(-dis / (2 * std * std))\n weight = weight / weight.sum()\n return weight", "def gauss_convolution(im_array, n_fwhm, fwhm) :\n \n sigma = fwhm / (2.*math.sqrt(2.*math.log(2.)))\n\t\n im_kernel_array = gauss_kernel(n_fwhm, sigma)\n conv_image = signal.convolve(im_array,im_kernel_array,mode = 'same')\n\n return (conv_image)", "def MVgaussian(size,mu1=0,mu2=0, sigma1=3,sigma2 = 1):\n kernel = np.zeros((size, size), dtype=np.float32)\n \n size = int(size) // 2\n X = np.arange(-size,size+1)\n Y = np.arange(-size,size+1)\n \n for x in X:\n for y in Y:\n Gx = np.exp(-((x-mu1)**2)/(2*(sigma1**2)))\n Gy = np.exp(-((y-mu2)**2)/(2*(sigma2**2)))\n Gx = math.exp(-(math.pow(x-mu1,2))/(2*math.pow(sigma1,2)))\n Gy = math.exp(-(math.pow(y-mu2,2))/(2*math.pow(sigma2,2)))\n kernel[x+size,y+size] = Gx*Gy\n return kernel" ]
[ "0.7698725", "0.7291328", "0.7062091", "0.70119226", "0.70004064", "0.6961366", "0.6934092", "0.69159126", "0.68459934", "0.67787015", "0.6736879", "0.672049", "0.6705494", "0.66477126", "0.6604094", "0.66021335", "0.65580577", "0.6506256", "0.6482842", "0.6423013", "0.63718224", "0.63600194", "0.6260924", "0.6260374", "0.6247022", "0.62363905", "0.62361294", "0.6218746", "0.62042636", "0.62014854" ]
0.7816964
0
This function creates blur filter with gaussian matrix, using Fourier Transform.
def blur_fourier(im, kernel_size): kernel = gaus_kernel_calc(kernel_size) zeros = np.zeros(im.shape) x_mid = np.math.floor(im.shape[1] / 2) y_mid = np.math.floor(im.shape[0] / 2) distance = np.math.floor(kernel_size / 2) zeros[x_mid - distance: x_mid + distance + 1, y_mid - distance: y_mid + distance + 1] = kernel fourier_kernel = DFT2(np.fft.ifftshift(zeros)) fourier_img = DFT2(im) fourier_blured = fourier_kernel * fourier_img return IDFT2(fourier_blured).real.astype(np.float64)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def blur_fourier(im, kernel_size):\n im = im.astype(np.float64)\n # build the kernel with zero padding\n kernel_base = gaussian_kernel_factory(kernel_size)\n window = np.zeros_like(im).astype(np.float64)\n M, N = im.shape\n dx, dy = kernel_base.shape\n x_middle, y_middle = N//2, M//2\n\n window[(y_middle-dy//2):(y_middle+dy//2+1), (x_middle-dx//2):(x_middle+dx//2+1)] = kernel_base\n\n # multiply in the freq domain\n return IDFT2(DFT2(im) * DFT2(np.fft.ifftshift(window))).real", "def fmgf(array, sigma):\n x, y = np.arange(len(array)), array.copy()\n yg = ndimage.filters.gaussian_filter(y, sigma)\n y -= yg\n\n # digitizing\n m = 101\n dy = 6.0 * mad(y) / m\n ybin = np.arange(np.min(y) - 5 * dy, np.max(y) + 5 * dy + dy, dy)\n z = np.zeros([len(ybin), len(x)])\n z[np.digitize(y, ybin), x] = 1.0\n\n # filtering\n g = partial(ndimage.filters.gaussian_filter, sigma=(0, sigma))\n c = partial(ndimage.filters.convolve1d, weights=np.ones(m), axis=0)\n zf = c(c(c(g(z))))\n\n # estimates\n ym1, y0, yp1 = [ybin[np.argmax(zf, 0) + i] for i in (-1, 0, 1)]\n zm1, z0, zp1 = [zf[np.argmax(zf, 0) + i, x] for i in (-1, 0, 1)]\n t = (zm1 - z0) / (zm1 - 2 * z0 + zp1)\n\n filtered = yg + ((1 - t) ** 2) * ym1 + (2 * t * (1 - t)) * y0 + (t**2) * yp1\n return filtered", "def blur2D(image2D,sigmaFrac=10):\n #NOTE: sigma may not be number of pixels\n ftimage = np.fft.fftshift(np.fft.fft2(image2D))\n ncols, nrows = image2D.shape\n cy, cx = nrows/2, ncols/2\n sigmax,sigmay=ncols/sigmaFrac,nrows/sigmaFrac\n x = np.linspace(0, nrows, nrows)\n y = np.linspace(0, ncols, ncols)\n X, Y = np.meshgrid(x, y)\n gmask = np.exp(-(((X-cx)/sigmax)**2 + ((Y-cy)/sigmay)**2))\n return np.abs(np.fft.ifft2(ftimage * gmask))", "def gaussian_blur(self,img):\n return cv2.GaussianBlur(img, (self.kernel_size, self.kernel_size), 0)", "def gs_blur(self,k,img):\n SIG = self.sigma\n sig = [SIG,k*SIG,k*k*SIG,k*k*k*SIG,k*k*k*k*SIG]\n gsArray = [0,1,2,3,4]\n scaleImages = [0,1,2,3,4]\n \n for i in range(5):\n gsArray[i] = scipy.ndimage.filters.gaussian_filter(img,sig[i])\n\n return gsArray", "def gaussianblur_transform(im):\n im_gblur = cv2.GaussianBlur(im,(5,5),0)\n return im_gblur", "def gaussian_blur(img: np.ndarray, kernel_size: int = 3):\n imgtype = img.dtype\n h,w,c = img.shape\n\n #Get a valid kernel size\n kernel_size = valid_kernel(h,w,kernel_size)\n \n #Gaussian Filter Blur\n blurred = cv2.GaussianBlur(img,(kernel_size,kernel_size),0)\n\n return blurred.astype(imgtype)", "def __gaussian_blur(self, img, kernel_size=3):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussianBlur(img,ksize=(5,5),sigma=10):\n #kernel = cv2.getGaussianKernel(ksize,sigma)\n dst = np.zeros_like(img)\n cv2.GaussianBlur(src=img,dst=dst,ksize=ksize,sigmaX=0)\n return dst", "def blurImage1(in_image: np.ndarray, kernel_size: np.ndarray) -> np.ndarray:\r\n size = kernel_size[0]\r\n sigma = 1\r\n x, y = np.mgrid[-size:size + 1, -size:size + 1]\r\n normal = 1 / (2.0 * np.pi * sigma ** 2)\r\n g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2))) * normal\r\n in_image = cv2.filter2D(in_image, -1, g)\r\n return in_image", "def test_gaussian_filter():\n\n def rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n\n return gray\n\n img = rgb2gray(np.array(Image.open('data/graf.png')))\n gx, x = gauss_module.gauss(4)\n gx = gx.reshape(1, gx.shape[0])\n gy = gx.reshape(gx.shape[1], gx.shape[0])\n smooth_img = conv2(img, gx * np.array(gy))\n\n test_smooth_img = gauss_module.gaussianfilter(img, 4)\n\n assert np.all(smooth_img.round(5) == test_smooth_img.round(5))", "def gaussianBlurring(frame):\n return cv2.GaussianBlur(frame, ksize =(11, 11), sigmaX = 0)", "def Gauss_filter(data, sigma=(0,2,2), mode='wrap'): \n import scipy.ndimage.filters as flt\n return flt.gaussian_filter(data, sigma=sigma, mode=mode)", "def gauss_convolution_fft(im_array, n_fwhm, fwhm) :\n \n sigma = fwhm / (2.*math.sqrt(2.*math.log(2.)))\n\t\n im_kernel_array = gauss_kernel(n_fwhm, sigma)\n fftconv_image = signal.fftconvolve(im_array,im_kernel_array,mode = 'same')\n\n return (fftconv_image)", "def fake_gaussian(img, vertical_horizontal_sigma, iter=3):\n sigma_vertical, sigma_horizontal = vertical_horizontal_sigma\n h_blured = box_filter1d(img, sigma_horizontal, horizontal=True, iter=iter)\n blured = box_filter1d(h_blured, sigma_vertical, horizontal=False, iter=iter)\n return blured", "def reblur(inp_img):\n\n img = np.array(inp_img)\n kernel_deviation = 1.5\n y_img = cv2.GaussianBlur(img, (11, 11), kernel_deviation)\n\n return y_img", "def blurImage2(in_image: np.ndarray, kernel_size: np.ndarray) -> np.ndarray:\r\n gaussian_kernel = cv2.getGaussianKernel(kernel_size[0], sigma=0)\r\n out_img = cv2.filter2D(in_image, -1, gaussian_kernel)\r\n return out_img", "def gaussian_blur(device, img, ksize, sigmax=0, sigmay=None, debug=None):\n\n img_gblur = cv2.GaussianBlur(img, ksize, sigmax, sigmay)\n\n device += 1\n if debug == 'print':\n print_image(img_gblur, (str(device) + '_gaussian_blur.png'))\n elif debug == 'plot':\n if len(img_gblur) == 3:\n plot_image(img_gblur)\n else:\n plot_image(img_gblur, cmap='gray')\n\n return device, img_gblur", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)", "def gaussian_blur(img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)" ]
[ "0.7033389", "0.6549185", "0.6358396", "0.6321791", "0.6246919", "0.6219117", "0.62025046", "0.61897266", "0.6176243", "0.61696154", "0.61396426", "0.61196125", "0.61132675", "0.61000806", "0.60520715", "0.6044603", "0.60240215", "0.6006676", "0.5973492", "0.5973492", "0.5973492", "0.5973492", "0.5973492", "0.5973492", "0.5973492", "0.5973492", "0.5973492", "0.5973492", "0.5973492", "0.5973492" ]
0.70527494
0
Masks the genotype call if it is not in a native segment. It does this by determining whether position is between start and end intervals for that ind (bed files are NAT_NAT regions
def ind_pos(position, ind, current_geno, chr_starts, chr_ends): ind_starts = chr_starts[ind] ind_ends = chr_ends[ind] #print [position, ind, current_geno, ind_starts, ind_ends] in_interval = False for interval in range(len(ind_starts)): if position > int(ind_starts[interval]) and position < int(ind_ends[interval]): in_interval = True break if in_interval: return(current_geno) else: return("./.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tnuc_region_in_intron(np, beg, end):\n\n if beg.tpos == 0 or end.tpos == 0: return False\n if beg.pos == end.pos and beg.tpos*end.tpos > 0:\n return True\n if beg.pos+1 == end.pos and beg.tpos>0 and end.tpos<0:\n return True\n if end.pos+1 == beg.pos and beg.tpos<0 and end.tpos>0:\n return True\n\n return False", "def tnuc_region_in_exon(np, beg, end):\n\n if beg.tpos != 0: return False\n if end.tpos != 0: return False\n for i in range(beg.pos, end.pos-1):\n if abs(np[i] - np[i+1]) != 1:\n return False\n return True", "def isolate_range(start_addr, end_addr):\n\n split_classification(start_addr)\n split_classification(end_addr)", "def test_bad_region():\n ref_file = pkg_resources.resource_filename('m260b.test_data', 'ref_practice_W_1_chr_1.fasta')\n read_file = pkg_resources.resource_filename('m260b.test_data', 'practice_w_1.std.bad_region1.bam')\n ref_hdr, reference = read_basic_fasta(ref_file) \n read_iter = pysam.Samfile(read_file)\n chr = ref_hdr[1:].strip()\n areg = list(active_regions(read_iter, reference, chr, start_offset=0, flank=30, dfrac=1.0))\n found = False\n for region, reads in areg:\n found |= region.start <= 5769 <= region.stop\n if not found:\n raise ValueError('Window did not open around variant')", "def covers_overlaps(self, bounds):\n bounds = tuple(float(b) for b in bounds)\n return self.numba_rtree.covers_overlaps(bounds)", "def is_ann_limits(limitfile):\n tokens = os.path.splitext(os.path.basename(limitfile))[0].split('_')\n if tokens[3] in ['point', 'map', 'radial']:\n return True\n return tokens[2] in ['point', 'map', 'radial']", "def region_gene_overlap(\n region_pr,\n gene_bed,\n up=100_000,\n down=100_000,\n):\n genes = pr.read_bed(gene_bed)\n # Convert to DataFrame & we don't need intron/exon information\n genes = genes.as_df().iloc[:, :6]\n\n # Get the TSS only\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] = genes.loc[\n genes[\"Strand\"] == \"+\", \"Start\"\n ]\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] = genes.loc[\n genes[\"Strand\"] == \"-\", \"End\"\n ]\n\n # Extend up and down\n genes.loc[genes[\"Strand\"] == \"+\", \"Start\"] -= up\n genes.loc[genes[\"Strand\"] == \"+\", \"End\"] += down\n genes.loc[genes[\"Strand\"] == \"-\", \"Start\"] -= down\n genes.loc[genes[\"Strand\"] == \"-\", \"End\"] += up\n\n # Perform the overlap\n genes = pr.PyRanges(genes)\n genes = genes.join(region_pr).as_df()\n\n return genes", "def nondetects(self, masked=False):\r\n grd = self.grd\r\n xnd = []\r\n ynd = []\r\n ncells = len(grd.cells['depth'])\r\n non_detects_i_tr = np.zeros(ncells, np.int32)\r\n if masked:\r\n not_flagged = np.where(self.rec_track.flagged==0)[0]\r\n rec_track = self.rec_track[not_flagged]\r\n rec_seg = self.make_segments(set_depth=True, \r\n input_rec_track=rec_track)\r\n else:\r\n rec_seg = self.rec_seg\r\n for nr, rseg in enumerate(rec_seg):\r\n seg = rec_seg[nr]\r\n dt = seg.dt\r\n if dt > dt_signal+1:\r\n t1 = seg.t1\r\n t2 = seg.t2\r\n nint = int(np.rint((t2-t1)/dt_signal)) - 1\r\n x1 = seg.x1\r\n x2 = seg.x2\r\n y1 = seg.y1\r\n y2 = seg.y2\r\n dx_nd = (x2 - x1)/float(nint+1)\r\n dy_nd = (y2 - y1)/float(nint+1)\r\n if nint < 120: # 10 minute cutoff for nondetect filling\r\n xint = [x1 + n*dx_nd for n in range(1,nint)]\r\n yint = [y1 + n*dy_nd for n in range(1,nint)]\r\n xnd = xnd + xint\r\n ynd = ynd + yint\r\n\r\n for nd in range(len(xnd)):\r\n xy = [xnd[nd], ynd[nd]]\r\n i = grd.select_cells_nearest(xy)\r\n if (i is not None) and (i >= 0):\r\n non_detects_i_tr[i] += 1\r\n\r\n return non_detects_i_tr", "def bounds(self, pos):", "def encode_segmap(self, mask):\n for voidc in self.void_labels:\n mask[mask == voidc] = self.ignore_index\n for validc in self.valid_labels:\n mask[mask == validc] = self.class_map[validc]\n # remove extra idxs from updated dataset\n mask[mask > 33] = self.ignore_index\n return mask", "def annotate_split_range(self, address, size, memtype, description):\n _, _ = self.split_range_at(address) # do not keep return values, following call may delete it from self.ranges\n end, _ = self.split_range_at(address+size)\n begin = self.get_range(address)\n\n begin.settype(memtype, description)\n if end != begin:\n print(\"Annotating '%s' over 2 separate ranges: %s and %s\" % (description, str(begin), str(end)))\n # TODO: merge the two\n end.settype(memtype, description)", "def detect_badsegments(\n raw,\n picks,\n segment_len=1000,\n significance_level=0.05,\n metric='std',\n ref_meg='auto',\n mode=None,\n detect_zeros=True,\n):\n\n gesd_args = {'alpha': significance_level}\n\n if (picks == \"mag\") or (picks == \"grad\"):\n chinds = mne.pick_types(raw.info, meg=picks, ref_meg=ref_meg, exclude='bads')\n elif picks == \"meg\":\n chinds = mne.pick_types(raw.info, meg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eeg\":\n chinds = mne.pick_types(raw.info, eeg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"eog\":\n chinds = mne.pick_types(raw.info, eog=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"ecg\":\n chinds = mne.pick_types(raw.info, ecg=True, ref_meg=ref_meg, exclude='bads')\n elif picks == \"emg\":\n chinds = mne.pick_types(raw.info, emg=True, ref_meg=ref_meg, exclude='bads')\n else:\n raise NotImplementedError(f\"picks={picks} not available.\")\n\n if mode is None:\n if detect_zeros:\n bdinds_maxfilt = detect_maxfilt_zeros(raw)\n else:\n bdinds_maxfilt = None\n XX, XX_times = raw.get_data(picks=chinds, reject_by_annotation='omit', return_times=True)\n elif mode == \"diff\":\n bdinds_maxfilt = None\n XX, XX_times = raw.get_data(picks=chinds, reject_by_annotation='omit', return_times=True)\n XX = np.diff(XX, axis=1)\n XX_times = XX_times[1:] # remove the first time point\n\n allowed_metrics = [\"std\", \"var\", \"kurtosis\"]\n if metric not in allowed_metrics:\n raise ValueError(f\"metric {metric} unknown.\")\n if metric == \"std\":\n metric_func = np.std\n elif metric == \"var\":\n metric_func = np.var\n else:\n def kurtosis(inputs):\n return stats.kurtosis(inputs, axis=None)\n metric_func = kurtosis\n \n bdinds = sails.utils.detect_artefacts(\n XX,\n axis=1,\n reject_mode=\"segments\",\n metric_func=metric_func,\n segment_len=segment_len,\n ret_mode=\"bad_inds\",\n gesd_args=gesd_args,\n )\n\n for count, bdinds in enumerate([bdinds, bdinds_maxfilt]):\n if bdinds is None:\n continue\n if count==1:\n descp1 = count * 'maxfilter_' # when count==0, should be ''\n descp2 = ' (maxfilter)'\n else:\n descp1 = ''\n descp2 = ''\n onsets = np.where(np.diff(bdinds.astype(float)) == 1)[0]\n\n if bdinds[0]:\n onsets = np.r_[0, onsets]\n offsets = np.where(np.diff(bdinds.astype(float)) == -1)[0]\n\n if bdinds[-1]:\n offsets = np.r_[offsets, len(bdinds) - 1]\n assert len(onsets) == len(offsets)\n descriptions = np.repeat(\"{0}bad_segment_{1}\".format(descp1, picks), len(onsets))\n logger.info(\"Found {0} bad segments\".format(len(onsets)))\n\n onsets_secs = raw.first_samp/raw.info[\"sfreq\"] + XX_times[onsets.astype(int)]\n offsets_secs = raw.first_samp/raw.info[\"sfreq\"] + XX_times[offsets.astype(int)]\n durations_secs = offsets_secs - onsets_secs\n\n raw.annotations.append(onsets_secs, durations_secs, descriptions)\n\n mod_dur = durations_secs.sum()\n full_dur = raw.n_times / raw.info[\"sfreq\"]\n pc = (mod_dur / full_dur) * 100\n s = \"Modality {0}{1} - {2:02f}/{3} seconds rejected ({4:02f}%)\"\n logger.info(s.format(\"picks\", descp2, mod_dur, full_dur, pc))\n\n return raw", "def testOffsetsOutOfBoundsDetection(self):\n sim = Simulation()\n sim.set_simulation_parameters(\n seed=11,\n task=36,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=1,\n sample_size=0.01,\n max_time=100,\n )\n sim.set_map_files(sample_file=\"null\", fine_file=\"sample/SA_sample.tif\")\n sim.optimise_ram(ram_limit=10000)\n self.assertEqual(sim.fine_map.x_size, sim.sample_map.x_size)\n self.assertEqual(sim.fine_map.y_size, sim.sample_map.y_size)\n self.assertEqual(0, sim.sample_map.x_offset)\n self.assertEqual(0, sim.sample_map.y_offset)\n self.assertEqual(sim.fine_map.x_size, sim.grid.x_size)\n self.assertEqual(sim.fine_map.y_size, sim.grid.y_size)\n self.assertEqual(\"null\", sim.grid.file_name)\n sim.run()", "def regionstomask(in_regions, genome_len):\n out_mask = np.zeros((2,genome_len)).astype(bool)\n for region in in_regions:\n out_mask[region[0],region[1]:region[2]+1] = True\n return out_mask", "def is_in_map(self, x_ind, y_ind):\n return not (x_ind < self.origin[0] or\n x_ind > self.origin[0] + self.n * self.resolution or\n y_ind < self.origin[1] or\n y_ind > self.origin[1] + self.n * self.resolution)", "def get_regions_mask(self, input):", "def check_exon_boundary(self, pos):\n\n self.ensure_position_array()\n if pos.tpos > 0:\n if pos.pos == len(self.np) or pos.pos < 0:\n return\n x = self._tnuc2gnuc(pos.pos)\n y = self._tnuc2gnuc(pos.pos+1)\n if abs(x-y) == 1: # continuous genomic coordinates for continuous cDNA coordinates\n raise IncompatibleTranscriptError('exon_boundary_violation_cDNA_[%d_%d]_gDNA_[%d_%d]' % (pos.pos,pos.pos+1,x,y))\n elif pos.tpos < 0:\n if pos.pos == 1:\n return\n x = self._tnuc2gnuc(pos.pos-1)\n y = self._tnuc2gnuc(pos.pos)\n if abs(x-y) == 1: # continuous genomic coordinates for continuous cDNA coordinates\n raise IncompatibleTranscriptError('exon_boundary_violation_cDNA_[%d_%d]_gDNA_[%d_%d]' % (pos.pos-1,pos.pos,x,y))", "def _does_token_overlap_with_annotation(\n token: Token, annot_start: int, annot_end: int\n) -> bool:\n\n return (\n annot_start <= token.idx <= annot_end\n or token.idx <= annot_start <= token.idx + len(token)\n )", "def extract_fasta_region(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,variants=\"N\",missing_char=\"N\"):\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tif inds==\"all\" or inds==[\"all\"]:inds=input_vcf.samples# transform \"all\" in a list of all individuals in the vcf\n\tif type(inds) == str: inds=[inds]\n\tdict_seq={}#dictionnary to stock diploid seq\n\tif variants==\"DIP\":\n\t\tfor ind in inds:\n\t\t\tdict_seq[ind] = [\"\",\"\"]\n\telse:\n\t\tfor ind in inds:\n\t\t\tdict_seq[ind] = \"\"\n\tif not all(ind in input_vcf.samples for ind in inds): raise Exception(\"not all the individuals in\",inds, \" are found in the vcf samples:\",input_vcf.samples) \n\t#Function\n\t###identify individual to remove when calculating stats\n\tinds_to_delete=[]\n\tfor i,ind in enumerate(input_vcf.samples):#check which ind is in sample and compare it to our list of inds\n\t\t if ind not in inds:#delete this ind\n\t\t \tinds_to_delete.append(i)\n\t#go along the region\n\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t \tfor index in sorted(inds_to_delete)[::-1]:#remove the individuals we do not want\n\t \t\tdel record.samples[index]\n\t \tif \"DP\" in record.FORMAT:\n\t\t\tfor sample in record.samples:\n\t\t\t\tif mincov<sample[\"DP\"]<maxcov and sample.called==True:\n\t\t\t\t\tif variants==\"DIP\":\n\t\t\t\t\t\tdict_seq[sample.sample][0]+=sample.gt_bases.split(\"/\")[0]\n\t\t\t\t\t\tdict_seq[sample.sample][1]+=sample.gt_bases.split(\"/\")[1]\n\t\t\t\t\telif variants==\"RAN\":#randomly pick allele one or two every time\n\t\t\t\t\t\tdict_seq[sample.sample]+=sample.gt_bases.split(\"/\")[random.choice([0,1])]\n\t\t\t\t\telse :\n\t\t\t\t\t\tif sample.gt_bases.split(\"/\")[0]!=sample.gt_bases.split(\"/\")[1]: # If the two alleles are different add the character specify in \"variants\" \n\t\t\t\t\t\t\tdict_seq[sample.sample]+=variants\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdict_seq[sample.sample]+=sample.gt_bases.split(\"/\")[0]\n\t\t\t\telse:\n\t\t\t\t\tif variants==\"DIP\":\n\t\t\t\t\t\tdict_seq[sample.sample][0]+=missing_char\n\t\t\t\t\t\tdict_seq[sample.sample][1]+=missing_char\n\t\t\t\t\telse:\n\t\t\t\t\t\t\tdict_seq[sample.sample]+=missing_char\n\t\telse:\n\t\t\tif variants==\"DIP\":\n\t\t\t\tfor key in dict_seq.keys():\n\t\t\t\t\tdict_seq[key][0]+=missing_char\n\t\t\t\t\tdict_seq[key][1]+=missing_char\n\t\t\telse:\n\t\t\t\tfor key in dict_seq.keys():\n\t\t\t\t\tdict_seq[key]+=missing_char\n\t#Cheange the key to fasta header\n\tfinal_dict={}\n\tfor key in dict_seq.keys():\n\t\tnewkey=\">\"+chrom+\"_\"+str(start)+\"_\"+str(end)+\"_\"+key\n\t\tif variants==\"DIP\":\n\t\t\tnewkey1=\">\"+chrom+\"_\"+str(start)+\"_\"+str(end)+\"_\"+key+\"_allele1\"\n\t\t\tfinal_dict[newkey1]=dict_seq[key][0]\n\t\t\tnewkey2=\">\"+chrom+\"_\"+str(start)+\"_\"+str(end)+\"_\"+key+\"_allele2\"\n\t\t\tfinal_dict[newkey2]=dict_seq[key][1]\n\t\telse:\n\t\t\tnewkey=\">\"+chrom+\"_\"+str(start)+\"_\"+str(end)+\"_\"+key\n\t\t\tfinal_dict[newkey]=dict_seq[key]\n\treturn final_dict", "def _inside_op_range(self, idx):\n\n if idx < self._parameters.op_range[0]:\n return False\n return (self._parameters.op_range[1] < 0 or\n idx <= self._parameters.op_range[1])", "def compatibility_g_a(gen, anot):\n print(\"Checking compatibility of genome with annotation file\")\n r_code = 0\n for seq in gen:\n if seq not in anot:\n print(\"WARN\\t{} sequence not found in annotaion file\".format(seq))\n r_code = 1\n for seq in anot:\n if seq not in gen:\n print(\"FAIL\\t{} sequence in annotation \"\n \"but not in genome.\".format(seq))\n r_code = 2\n elif anot[seq] > gen[seq]:\n print(\"FAIL\\tannotation interval on {} sequence is out of \"\n \"reference range.\".format(seq))\n r_code = 2\n print()\n return r_code", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def segment_outer_range(segment_lengths, out_idx=tf.int32):\n max_length = tf.reduce_max(segment_lengths)\n tiled_range = tf.tile(tf.expand_dims(tf.range(tf.size(segment_lengths, out_type=out_idx)), 1), [1, max_length])\n return tf.boolean_mask(\n tiled_range, tf.sequence_mask(segment_lengths, max_length))", "def _find_start_or_end_non_code(\n cls, segments: Sequence[BaseSegment]\n ) -> Optional[int]:\n if segments:\n for idx in [0, -1]:\n if not cls._is_code_or_meta(segments[idx]):\n return idx\n return None", "def flag_sgrnas(sgrna_df, flag_seqs, flag_seqs_start, flag_seqs_end):\n flagged_df = sgrna_df.copy()\n flag_cols = []\n for seq in flag_seqs:\n flagged_df[seq] = [seq if x else None for x in flagged_df['sgrna_sequence'].str.contains(seq)]\n flag_cols.append(seq)\n for seq in flag_seqs_start:\n seq_str = '(start)' + seq\n flagged_df[seq_str] = [seq_str if x else None for x in flagged_df['sgrna_sequence'].str.startswith(seq)]\n flag_cols.append(seq_str)\n for seq in flag_seqs_end:\n seq_str = seq + '(end)'\n flagged_df[seq_str] = [seq_str if x else None for x in flagged_df['sgrna_sequence'].str.endswith(seq)]\n flag_cols.append(seq_str)\n flagged_df['flag'] = flagged_df[flag_cols].apply(\n lambda row: ', '.join([x for x in row if not pd.isna(x)]), axis=1)\n flagged_df = flagged_df.drop(flag_cols, axis=1)\n return flagged_df", "def recheckPosition(self):\n self.start = self.bounds[0].pos\n self.end = self.bounds[1].pos", "def which_region(self, g):\n return NotImplementedError", "def sample_pin_position_range():\n #Create a sample goniometer\n g = TopazInHouseGoniometer()\n\n #Initialize the leg limits\n g.relative_sample_position = column([0.0, 0.0, 0.0])\n g.getplatepos(0.0, 0.0, 0.0)\n g.calculate_leg_xy_limits(visualize=True)\n\n# if True:\n# pylab.show()\n# return\n\n n = 17\n positions = np.linspace(-8, 8, n) #Range calculated in mm\n allowed = np.zeros( (n,n,n) )\n for (ix, x) in enumerate(positions):\n print \"Calculating x\", x\n for (iy, y) in enumerate(positions):\n for (iz, z) in enumerate(positions):\n #Set up\n g.relative_sample_position = column([x, y, z])\n allowed[ix,iy,iz] = g.are_angles_allowed([0., 0., 0.], return_reason=False)\n\n #Do a plot\n\n pylab.figure(1, figsize=[15,15])\n pylab.title(\"Allowable XZ sample positions\")\n for (iy, y) in enumerate(positions):\n print \"At y of\", y, \", # of points = \", np.sum( allowed[:, iy,:])\n if iy < 16:\n pylab.subplot(4,4,iy+1)\n pylab.pcolor(positions, positions, allowed[:, iy, :].transpose(), norm=pylab.Normalize(0, 1))\n pylab.xlabel(\"x\")\n pylab.ylabel(\"z\")\n pylab.title(\"y = %.3f mm\" % y)\n pylab.draw()\n pylab.axis('equal')\n pylab.show()\n #pylab.", "def masktoregions(in_mask):\n regions = []\n for i in [0,1]: # do the thing for the first and second strands\n current_strand = in_mask[i].copy().astype(float)\n current_strand[-1] = np.nan # set final position to np.nan to avoid overlap issues\n transitions = current_strand - np.roll(current_strand,1)\n true_start = np.where(transitions == 1)[0]\n true_end = np.where(transitions == -1)[0] - 1\n if current_strand[0] == 1: # if starts on True, add True start to front end\n true_start = np.r_[0,true_start]\n if in_mask[i][-1] == True: # if ends on True, add True end to back end\n true_end = np.r_[true_end, len(current_strand)-1]\n if in_mask[i][-2] == False: # if the one before is False, it's a single point True\n true_start = np.r_[true_start,len(current_strand)-1]\n if np.all(in_mask[i][-2:] == [True, False]):\n true_end = np.r_[true_end, len(current_strand)-2]\n regions.append(np.asarray([np.zeros(len(true_start))+i,true_start,true_end]).T)\n out_regions = np.concatenate(regions,axis=0).astype(int)\n return out_regions", "def isUndefinedRange(program: ghidra.program.model.listing.Program, startAddress: ghidra.program.model.address.Address, endAddress: ghidra.program.model.address.Address) -> bool:\n ..." ]
[ "0.5558654", "0.51969874", "0.5173438", "0.50165325", "0.4989496", "0.49694717", "0.4968496", "0.49294603", "0.49171147", "0.4901071", "0.4888251", "0.4886161", "0.48798653", "0.48647732", "0.4821777", "0.4810842", "0.47864586", "0.47824484", "0.4780181", "0.4776257", "0.47637388", "0.47513595", "0.47496626", "0.4739018", "0.4736262", "0.473185", "0.47293788", "0.47277775", "0.47027868", "0.4698535" ]
0.555699
1
For the selected reports (training or testing) in the database, process each report with peFinder
def processReports(self): count = 0 for r in self.reports: #need to change the next two lines so that the fields are not hard-coded self.currentCase = r.id self.currentText = r.impression.lower() self.analyzeReport(self.currentText, "disease", modFilters=['indication','probable_existence', 'definite_existence', 'historical','future','pseudoneg', 'definite_negated_existence', 'probable_negated_existence']) self.recordResults()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)", "def _go_through_summary_reports(self):\n\n for result_file in self.result_files:\n self.cur_8digit_dir = os.path.split(result_file)[0]\n try:\n with open(result_file) as f_in:\n sum_rep = json.load(f_in)\n if sum_rep.has_key('Artifacts'):\n for linked_artifact in sum_rep['Artifacts']:\n artifact_path = linked_artifact['Path']\n # For now assume only files are linked (no folders)\n rel_path_from_results = os.path.join(self.cur_8digit_dir, artifact_path)\n if os.path.exists(rel_path_from_results):\n self.files_for_export.append(os.path.join('results',\n rel_path_from_results))\n if artifact_path.endswith('.json'):\n function_tag = artifact_path.replace('.','_').replace('/','_')\n\n if hasattr(self, function_tag):\n getattr(self, function_tag)()\n except IOError:\n print '{0} does not exist on this filesystem. I cannot be check for references '\\\n 'to other files.'.format(result_file)", "def _process_result_detailed(\n self, test, dupes, findingdetail, query, result, find_date\n ):\n name, cwe, categories, queryId = self.getQueryElements(query)\n sev = result.get(\"Severity\")\n title = query.get(\"name\").replace(\"_\", \" \")\n state = result.get(\"state\")\n # Loop over <Path> (there should be only one)\n paths = result.findall(\"Path\")\n if (len(paths)) > 1:\n logger.warning(\n \"Checkmarx scan: more than one path found: \"\n + str(len(paths))\n + \". Only the last one will be used\"\n )\n\n for path in paths:\n sourceFilename = \"\"\n sinkFilename = \"\"\n sourceLineNumber = None\n sinkLineNumber = None\n sourceObject = \"\"\n sinkObject = \"\"\n similarityId = str(path.get(\"SimilarityId\"))\n path_id = str(path.get(\"PathId\"))\n pathId = similarityId + path_id\n findingdetail = \"{}-----\\n\".format(findingdetail)\n # Loop over function calls / assignments in the data flow graph\n for pathnode in path.findall(\"PathNode\"):\n findingdetail = self.get_description_detailed(\n pathnode, findingdetail\n )\n nodeId = pathnode.find(\"NodeId\").text\n if nodeId == \"1\":\n (\n sourceFilename,\n sourceLineNumber,\n sourceObject,\n ) = self.get_pathnode_elements(pathnode)\n # the last pathnode is the sink\n (\n sinkFilename,\n sinkLineNumber,\n sinkObject,\n ) = self.get_pathnode_elements(pathnode)\n # pathId is the unique id from tool which means that there is\n # basically no aggregation except real duplicates\n aggregateKeys = \"{}{}{}{}{}\".format(\n categories, cwe, name, sinkFilename, pathId\n )\n if title and sinkFilename:\n title = \"{} ({})\".format(title, sinkFilename.split(\"/\")[-1])\n\n find = Finding(\n title=title,\n cwe=int(cwe),\n test=test,\n active=self.isActive(state),\n verified=self.isVerified(state),\n false_p=result.get(\"FalsePositive\") == \"True\",\n description=findingdetail,\n severity=sev,\n file_path=sinkFilename,\n line=sinkLineNumber,\n date=find_date,\n static_finding=True,\n unique_id_from_tool=pathId,\n sast_source_object=sourceObject,\n sast_sink_object=sinkObject,\n sast_source_line=sourceLineNumber,\n sast_source_file_path=sourceFilename,\n vuln_id_from_tool=queryId,\n )\n dupes[aggregateKeys] = find", "def do(self):\r\n self.dlCsvReport()\r\n self.dlXlsReport()", "async def process_reports(self):\n features = [features for (__, features) in self.updates]\n\n # Faster way to deep flatten a list of lists compared to list comprehension\n feature_dataset = list(chain.from_iterable(features))\n\n # Training the model using all the features received from the client\n sampler = all_inclusive.Sampler(feature_dataset)\n self.algorithm.train(feature_dataset, sampler,\n Config().algorithm.cut_layer)\n\n # Test the updated model\n self.accuracy = self.trainer.test(self.testset)\n logging.info('[Server #{:d}] Global model accuracy: {:.2f}%\\n'.format(\n os.getpid(), 100 * self.accuracy))\n\n await self.wrap_up_processing_reports()", "def process_output_reports(results, analysis, date_now):\n #PLUG_INS[analysis.plug_in].set_data(analysis.title, file_path, results)\n output = PLUG_INS[analysis.plug_in]()\n file_path = settings.REPORT_PATH+\"/analysis%s_%s_%s_%s_%s_%s_%s\" % (analysis.id, date_now.year, date_now.month, date_now.day, date_now.hour, date_now.minute, date_now.second)\n output.set_data(analysis.title, file_path, results)\n\n result = AnalysisResult(analysis=analysis, output=string.split(output.get_output_file(), \"/\")[-1], run_date=date_now)\n result.save() \n analysis.last_report = date_now\n analysis.save()\n return True", "def prepare(self):\n for scenario_result, scenario_pass, case_pass in self.iterate():\n for step_result in scenario_result.step_results:\n step_pass = step_result.success\n url, method = step_result.fetch.url, step_result.fetch.method\n params = step_result.fetch.kwargs.get(\"params\")\n method_report = self.get_method_report(url, method)\n if method_report:\n method_report.add(\n case_pass, scenario_pass, step_pass, params\n )", "def buildReports(self):\n pass", "def test_report_definition(self):\n self.model = self.scan.model\n self.model.save()\n new_model = pycotools3.tasks.CopasiMLParser(self.copasi_file).xml\n reports = new_model.find('{http://www.copasi.org/static/schema}ListOfReports')\n check = False\n for report in reports:\n if report.attrib['name'] == 'parameter_estimation':\n check = True\n self.assertTrue(check)", "def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()", "def __generate_reports__(self,configs,mockdb):\n sample_keys = self.__completed_samples_list__(mockdb)\n n = len(sample_keys)\n numbers = configs['pipeline'].get('Flowcell_reports','numbers').split(',')\n numbers.sort(key=int,reverse=True)\n flowcell = mockdb['Flowcell'].__get__(configs['system'],key=self.flowcell_key)\n for number in numbers:\n if n >= int(number):\n if getattr(self,'flowcell_report_' + str(number) + '_key') is None:\n report = mockdb['FlowcellStatisticReport'].__new__(configs['system'],sample_keys=sample_keys,flowcell=flowcell,number=number,base_output_dir=self.base_output_dir)\n report.__fill_qsub_file__(configs)\n report.__launch__(configs['system'])\n setattr(self,'flowcell_report_' + str(number) + '_key',report.key)\n return True\n return False\n return False", "def create_reports():\n \n date_now = datetime.now()\n for report in Report.objects.filter(activated=True):\n\t\n\tif report.last_report == None or report.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[report.interval]):\n\t #if report is now so do not execute it times \n\t if report.last_report != None and report.interval == 'n':\n\t\tcontinue\n\t if report.date_to != None and report.date_to < date_now:\n\t\tcontinue\n\t \n\t # check if query is good\n\t check_ok, db_query = check_query(report)\n\t if not check_ok:\n\t\tcontinue\n\t \n\t # check if date patterns are in query\n\t date_pattern_from = string.find(db_query, \"${{d1}}\")\n\t date_pattern_to = string.find(db_query, \"${{d2}}\")\n\t if date_pattern_from != -1:\n\t\tdate_from = date_now - timedelta( seconds=PERIOD_CHOICES[report.interval])\n\t else:\n\t\tdate_from = None\n\t if date_pattern_to != -1:\n\t\tdate_to = date_now\n\t else:\n\t\tdate_to = None\n\n\t # excute reports for past periods\n\t if not execute_past_reports(report, db_query, date_from, date_to, date_now):\n\t\tcontinue\n\n\t # execute query for this time\n\t if date_from != None:\n\t\tdb_query = string.replace(db_query, \"${{d1}}\", \"new Date(%s,%s,%s)\" % (date_from.year, date_from.month - 1, date_from.day))\n\t if date_to != None:\n\t\tdb_query = string.replace(db_query, \"${{d2}}\", \"new Date(%s,%s,%s)\" % (date_to.year, date_to.month - 1, date_to.day))\n\n\t if not execute_query(db_query, report, date_now):\n\t\tprint \"error - unsupported query: report title: %s, id: \" % (report.title, report.id)\n\t\tcontinue\n\n return True", "def __execute_reporter(self):\n if not self.__args.report:\n return\n reporter.HTMLReporter().generate_report_from_file(\n self.__lst_json_files)", "def run(self):\n if not self._instance:\n self._instance = \\\n self.appresponse.reports.create_instance(self._data_defs)\n\n results = self._instance.get_data()['data_defs']\n\n for i, res in enumerate(results):\n source_name = self._data_defs[i].source.name\n self._data_defs[i].columns = res['columns']\n if 'data' in res:\n self._data_defs[i].data = self._cast_number(res,\n source_name)\n else:\n self._data_defs[i].data = []\n logger.debug(\"Obtained {} records for the {}th data request.\"\n .format(len(self._data_defs[i].data), i))", "def perform_parse(self):\n # get folder of pdf files\n folder = QFileDialog.getExistingDirectory(\n parent=self.parent(),\n caption='Get folder with PDF documents to parse'\n )\n if folder:\n # get list of fields and patterns\n field_list = self._get_fields()\n # performing parse\n results = make_parse(folder, field_list)\n self.open_result(results)", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def main():\r\n run_processes('tests.csv', 'labs.csv')", "def create_analysis():\n \n date_now = datetime.now()\n for analysis in Analysis.objects.filter(activated=True):\n\t\n\tif analysis.last_report == None or analysis.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[analysis.interval]):\n\t \n\t if analysis.last_report != None and analysis.interval == 'n':\n\t\tcontinue\n\t \n\t results = []\n\t for report in analysis.queries.filter(activated=True):\n\t\t\n\t\tif analysis.date_from != None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to, run_date__gte=analyses.date_from).order_by('run_date') \n\t\telif analysis.date_from == None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to).order_by('run_date')\n\t\telif analysis.date_from != None and analysis.date_to == None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__gte=analyses.date_from).order_by('run_date')\n\t\telse:\n\t\t report_results = ReportResult.objects.filter(report=report).order_by('run_date')\n\t\t\n\t\t# create output from mongo output\n\t\toutput_result = OutputResult(report=report.title)\n\t\toutput_result.date_array = []\n\t\toutput_result.output_array = []\n\t\tprint \"\\n KOLIK: \"+ str(output_result.output_array)\n\t\tfor result in report_results:\n\t\t output_result.date_array.append(result.run_date)\n\t\t #print result.output\n\t\t #print \"\\nouttest: \"+str(output_result.output_array)\n\t\t mongo_output = OutputMongo(result.output)\n\t\t output_result.output_array.append(mongo_output.getoutput())\n\n\t\tprint \"out: \",output_result.output_array\n\t\tresults.append(output_result) \n\n\n\t #print results[0].output_array\n\t #print \"\\n\\n\"\n\t #print results[1].output_array\n\t # process outputs\n\t if not process_output_reports(results, analysis, date_now):\n\t\tprint \"Error in execute analysis: %s\" % (analysis.title)\n\t\tcontinue\n\t \n\t if analysis.interval != 'n':\n\t\tif analysis.date_to != None:\n\t\t analysis.date_to = analysis.date_to + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\tif analysis.date_from != None:\n\t\t analysis.date_from = analysis.date_from + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\t \n return True", "def find_records():\r\n\r\n print(\"begin find records\")\r\n\r\n study_list = retrieve_ref('study_list')\r\n sensor_list = retrieve_ref('sensor_list')\r\n # sensor_unit_list = retrieve_ref('sensor_unit_list')\r\n\r\n for study in study_list:\r\n # print('study = ' + str(study))\r\n source_path = os.path.join(study, 'source')\r\n # print('source_path = ' + str(source_path))\r\n\r\n source_folders = os.listdir(source_path)\r\n # print(str(study) + ' source_folders = ')\r\n # print(source_folders)\r\n\r\n df_meta = pd.DataFrame()\r\n df_meta['source_path'] = source_folders\r\n save_meta(study, df_meta)\r\n record_to_summary(study, 'Records found', str(len(source_folders)))\r\n\r\n print(\"completed find records\")", "def run_test_suites(self, suites):\n for suite_class in suites:\n test_suite = suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def __init__(self, options):#dbname, outfile, save_dir, table, idcolumn, txtcolumn, doGraphs):\n\n t = time.localtime()\n\n self.doGraphs = options.doGraphs\n self.allow_uncertainty = options.allow_uncertainty\n self.proc_category = options.category\n\n self.reports = Report.objects.filter(dataset=options.dataset)[:options.number]\n\n #print \"number of reports to process\",len(self.reports)\n #raw_input('continue')\n\n # create context objects for each of the questions we want to be answering\n self.context = {\"disease\":pyConText.pyConText()}\n\n rsltsDB = options.odbname\n\n alerts=Alert.objects.all()\n alerts.delete()\n rslts=Result.objects.all()\n rslts.delete()\n\n # Create the itemData object to store the modifiers for the analysis\n # starts with definitions defined in pyConText and then adds\n # definitions specific for peFinder\n\n #label specifies whether the user wants a domain or linguistic set.\n\n #items returns an array of contextItems (e.g. getCategory(), getLiteral() )\n items_modifiers = itemData.instantiateFromSQLite(\"../pyConTextWeb.db\",options.label_modifiers,\"pyConTextKit_lexical\")\n items_targets = itemData.instantiateFromSQLite(\"../pyConTextWeb.db\",options.label_targets,\"pyConTextKit_lexical\")\n\t\t#itemData = itemData.itemData(items)\n \"\"\"\n probableNegations = itemData('PROBABLE_NEGATED_EXISTENCE')\n definiteNegations = itemData('DEFINITE_NEGATED_EXISTENCE')\n pseudoNegations = itemData('PSEUDONEG')\n indications = itemData('INDICATION')\n historicals = itemData('HISTORICAL')\n conjugates = itemData('CONJ')\n probables = itemData('PROBABLE_EXISTENCE')\n definites = itemData('DEFINITE_EXISTENCE')\n future = itemData('FUTURE')\n critItems = itemData('CRIT_ITEMS')\n\n self.modifiers = {\"disease\":itemData('')}\n self.modifiers[\"disease\"].prepend(pseudoNegations)\n self.modifiers[\"disease\"].prepend(definiteNegations)\n self.modifiers[\"disease\"].prepend(probableNegations)\n self.modifiers[\"disease\"].prepend(probables)\n self.modifiers[\"disease\"].prepend(definites)\n self.modifiers[\"disease\"].prepend(indications)\n self.modifiers[\"disease\"].prepend(conjugates)\n self.modifiers[\"disease\"].prepend(future)\n self.modifiers[\"disease\"].prepend(historicals)\n \t\"\"\"\n\n # Quality targets (generated from category parameter set by parser)\n if( options.category.lower() == 'all'):\n targetItems = critItems\n else:\n targetItems = itemData(options.category)\n self.targets = {\"disease\":targetItems}\n self.models = {}", "def query(self):\r\n self.reportDrivers()", "def do_analysis(ckpt, queries_type, entities_type, request):\n global currently_analyzing, results, d, analysis_user\n try:\n print(\"starting analysis!\")\n if entities_type == \"all\":\n print(\"using all entities detected!\")\n elif entities_type == \"uploaded\":\n print(\"using only entities specified in csv file!\")\n \n currently_analyzing = True\n analysis_user = request.user.username\n results = []\n proj_path = os.path.abspath(os.path.dirname(__file__)).split(\"FYP_Web_App\")[0]\n ckpt = proj_path + \"FewRel/checkpoint/\" + ckpt\n if d is None or d.ckpt_path != ckpt:\n d = DetectionFramework(ckpt_path=ckpt)\n if cancel_flag[0]:\n return\n d.clear_support_queries()\n if len([i for i in os.listdir(\"temp/relation_support_datasets\") if 'csv' in i and request.user.username in i]) == 0:\n raise ValueError(\"Please upload relation support dataset!\")\n \n d.load_support_files(\"temp/relation_support_datasets\", request.user.username)\n if queries_type == \"csv_option\":\n if not os.path.exists(\"temp/queries.csv\"):\n raise ValueError(\"Please upload query CSV dataset!\")\n d.load_queries_csv(\"temp/queries.csv\")\n \n elif queries_type == \"url_option\":\n if not os.path.exists(\"temp/url.txt\"):\n raise ValueError(\"Please specify news article url!\")\n with open(\"temp/url.txt\") as f:\n url = f.read()\n d.load_url(url)\n \n elif queries_type == \"txt_option\":\n d.load_text_files(os.path.abspath(\"temp/text_files\"))\n \n elif queries_type == \"ind_sentence_option\":\n ind_sentence = request.POST.get('ind_sent')\n d.load_ind_sentence(ind_sentence)\n \n elif queries_type == \"html_option\":\n d.load_html_file_queries(os.path.abspath(\"temp/html_files\"))\n \n if entities_type == \"uploaded\":\n d.trim_queries_based_on_entities_file(os.path.abspath(\"temp/entities_csv_file.csv\"))\n\n if cancel_flag[0]:\n return\n d.detect(rt_results=results, cancel_flag=cancel_flag)\n if cancel_flag[0]:\n return\n src=None\n if queries_type == \"csv_option\":\n src = \"queries_csv\"\n elif queries_type == \"txt_option\":\n src = \"queries_text_file\"\n elif queries_type == \"ind_sentence_option\":\n src = \"ind_sentence\"\n elif queries_type == \"url_option\":\n with open(\"temp/url.txt\") as f:\n src = f.read()\n elif queries_type == \"html_option\":\n src = \"html_files\"\n \n s = Source(source=src, user=request.user)\n s.save()\n for r in results:\n er = ExtractedRelation(sentence=r['sentence'],head=r['head'],tail=r['tail'],pred_relation=r['pred_relation'],sentiment=r['sent'],conf=r['conf'],ckpt=ckpt, source=s)\n er.save()\n except Exception as e:\n print(len(str(e)))\n print(str(e))\n errors.append(str(e))\n tb = traceback.format_exc()\n print(tb)\n finally:\n currently_analyzing = False\n analysis_user = None", "def runtime_analysis(config, overall_report):\n test_case_report_list = []\n \n for test_suite in config.get_test_suite():\n report = dict()\n report['stdout_stream'] = ''\n report['stderr_stream'] = ''\n report['outfile'] = ''\n\n input_for_stdin = config.get_test_suite_input_for_stdin(test_suite)\n # using Popen instead of run because I need access to the pid\n # See comment under \"except subprocess.TimeoutExpired:\"\n infile = \"xinfile_\" + uuid.uuid4().hex[0:16] + \".txt\"\n outfile = \"xoutfile_\" + uuid.uuid4().hex[0:16] + \".txt\"\n p = subprocess.Popen(['./run_jail.sh',\n config.output_filename,\n str(len(test_suite)), infile, outfile], # command\n stdout=subprocess.PIPE, # capture stdout\n stderr=subprocess.PIPE, # capture stderr\n stdin=subprocess.PIPE, # capture stdin\n universal_newlines=True, # use text mode for std* file objects\n start_new_session=True, # otherwise killing the process group will also kill the Python interpreter\n )\n\n try:\n # send test suite input\n with open(infile, \"w\") as f:\n f.write(input_for_stdin)\n (stdout_stream, stderr_stream) = p.communicate(timeout=config.timeout)\n \n report['return_code'] = p.returncode\n report['stderr_stream'] += stderr_stream\n report['stdout_stream'] += stdout_stream\n with open(outfile, \"r\") as f:\n current_outfile = f.read()\n report['outfile'] += current_outfile\n \n # check if test cases passed\n ret_output_match = config.check_for_output_match(current_outfile, test_suite)\n report['test_suite'] = test_suite\n report['output_match'] = ret_output_match\n \n except subprocess.TimeoutExpired:\n # kill the process group so that all child processes spawned by the process are also killed\n # The child need to be killed because, in addition to wasting CPU cycles,\n # it can hold stdout and then Python will wait indefinitely even if the timeout is expired\n os.killpg(os.getpgid(p.pid), signal.SIGKILL) \n report['timeout'] = True\n finally:\n test_case_report_list.append(report)\n \n overall_report['runtime_analysis_done'] = True\n\n return overall_report, test_case_report_list", "def run_processes(path_to_tests_file, path_to_labs_file):\r\n tests_dataframe = create_dataframe_from_csv(path_to_tests_file)\r\n labs_dataframe = create_dataframe_from_csv(path_to_labs_file)\r\n tests_dataframe = drop_missing_values_in_dataframe(tests_dataframe)\r\n labs_dataframe = drop_missing_values_in_dataframe(labs_dataframe)\r\n tests_dataframe = add_new_column(tests_dataframe, \"lab_name\")\r\n tests_dataframe = add_new_column(tests_dataframe, \"distance_from_lab\")\r\n tests_dataframe = add_new_column(tests_dataframe, \"time_test_arrives_lab\")\r\n tests_dataframe = update_lab_name_with_closest_lab(tests_dataframe, labs_dataframe)\r\n tests_dataframe = update_distance_from_closest_lab(tests_dataframe, labs_dataframe)\r\n tests_dataframe = update_time_test_arrives_lab(tests_dataframe, 60)\r\n tests_dataframe = update_completion_time(tests_dataframe)\r\n tests_dataframe = update_server_size(tests_dataframe)\r\n print(tests_dataframe)\r\n visualise_hourly_arrivals_at_each_lab(tests_dataframe)\r\n visualise_number_of_tests_simultaneously_processed_at_each_lab(tests_dataframe)", "def run_tests():\n source1 = TextModel('hilary_speaches')\n source1.add_file('hilary_source_text.txt')\n\n source2 = TextModel('bernie_speaches')\n source2.add_file('bernie_source_text.txt')\n\n new1 = TextModel('trump_speach')\n new1.add_file('trump_text.txt')\n new1.classify(source1, source2)\n\n new2 = TextModel('hilary_test')\n new2.add_file('hilary_test.txt')\n new2.classify(source1, source2)\n\n new3 = TextModel('bernie_test')\n new3.add_file('bernie_test.txt')\n new3.classify(source1, source2)\n\n new4 = TextModel('bill_clinton_test')\n new4.add_file('bill_clinton_source.txt')\n new4.classify(source1, source2)", "def pe_analysis():\r\n\r\n global params\r\n n = params['n']\r\n level = params['level']\r\n pemin = params['pemin']\r\n pemax = params['pemax']\r\n pestep = params['pestep']\r\n ns = params['ns']\r\n nt = params['nt']\r\n po = params['po']\r\n pf = params['pf']\r\n pe_dir = create_dir(TIMES_PATH + 'pe')\r\n filename = datetime.datetime.now().strftime(DATE_FORMAT) + '.csv'\r\n with open(pe_dir + filename, 'wb') as csvfile:\r\n fw = csv.writer(csvfile)\r\n fw.writerow(('lv', 'ns', 'nt', 'no', 'pe', 'ne', 'nf',\r\n 'method1', 'method2', 'method3_1', 'method3_2'))\r\n pe = pemin\r\n while pe <= pemax:\r\n no = max(int(round(po * nt)), 1)\r\n ne = max(int(round(pe * no)), 1)\r\n nf = max(int(round(pf * nt)), 1)\r\n times = [0] * 4\r\n for i in range(n):\r\n times = map(sum, zip(times, get_times(level, ns, nt, no, ne, nf)))\r\n show_progress('pe', pe, pemin, pemax, pestep, i, n)\r\n times = map(lambda x: float(x) / n, times)\r\n fw.writerow((level, ns, nt, no, pe, ne, nf) + tuple(times))\r\n pe += pestep\r\n csvfile.close()", "def report(self, *reporters):\n if len(reporters) == 0:\n reporters = [c() for c in dexy.reporter.Reporter.plugins if c.ALLREPORTS]\n\n for reporter in reporters:\n self.log.debug(\"Running reporter %s\" % reporter.ALIASES[0])\n reporter.run(self)", "def run(self, output):\n self.output = output\n if self.keyword:\n self.keywordReport()\n elif self.geography:\n self.geographyReport()\n else:\n self.customerReport()", "def query(self):\r\n reports = self.get_relevant_reports()\r\n new_files = self.construct_report_dict(reports)\r\n updated, new_reports = self.is_updated(new_files, self.old_files)\r\n if len(self.old_files) != 0 and updated:\r\n self.process_changes(new_reports)\r\n self.old_files = new_files" ]
[ "0.5900469", "0.58614814", "0.58482856", "0.5812443", "0.5654445", "0.5645506", "0.56158066", "0.55587703", "0.55587286", "0.55434436", "0.5528884", "0.5463674", "0.5459964", "0.54589087", "0.5455149", "0.5428829", "0.5415472", "0.5411975", "0.54082245", "0.540817", "0.5405722", "0.54008585", "0.539859", "0.538893", "0.5371671", "0.53674096", "0.5329541", "0.532428", "0.531649", "0.5314272" ]
0.65057874
0
Compute the area of a geospatial value. Returns FloatingValue The area of `self`
def area(self) -> ir.FloatingValue: return ops.GeoArea(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area(self) -> float:\n raise NotImplementedError", "def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)", "def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))", "def area(self):\n if len(self.exterior) < 3:\n raise Exception(\"Cannot compute the polygon's area because it contains less than three points.\")\n poly = self.to_shapely_polygon()\n return poly.area", "def area(self):\n return _property_op(arctern.ST_Area, self)", "def area(\n self):\n pi = numpy.pi\n area0 = 4.0 * pi / 8.0\n areadiv = 4.0 ** self.depth\n area = area0 / areadiv * (180.0 / pi) ** 2\n return area", "def area(self) -> npt.NDArray[np.float_]:\n return np.sum(self.faces.area)", "def area(self):\n return self._ned_shape.area", "def area(self):\n\n bbox = self.bbox\n area = Box.calculate_bbox_area(bbox, bbox_type=self.bbox_type)\n\n return area", "def area(self):\n area = self._lengths[0] * self._lengths[1] * math.sin(math.radians(self._angles[0]))\n area += self._lengths[2] * self._lengths[3] * math.sin(math.radians(self._angles[0]))\n return float('{:.2f}'.format(area * 0.5))", "def area(self) -> npt.NDArray[np.float_]:\n points = self._normalized_projection()\n a = sum(det(points[..., [0, i, i + 1], :]) for i in range(1, points.shape[-2] - 1))\n return 1 / 2 * np.abs(a)", "def area(self):\n return self.radius*self.radius*math.pi", "def area(self):\n\n return (self.x1 - self.x0) * (self.y1 - self.y0)", "def area(self):\n return math.pi * math.pow(self.radius, 2)", "def area(self):\n return (self.__radius ** 2 * math.pi)", "def area(self) -> torch.Tensor:\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area", "def area(self):\n semi_perimeter = self.perimeter() / 2\n area = semi_perimeter\n for l in self._lengths:\n area *= (semi_perimeter - l)\n return float('{:.2f}'.format(area**0.5))", "def area(self):\n return math.pi*self._radius*self._radius", "def area(self):\n return 0.5*np.abs(np.dot(self.x,np.roll(self.y,1))-np.dot(self.y,np.roll(self.x,1)))", "def get_area(geo_data):\n return geo_data[\"geometry\"].area", "def area(self):\n area = 0.25*self._sides*self._length**2 / math.tan(math.radians(180/self._sides))\n return float('{:.2f}'.format(area))", "def total_area(self):\n return numpy.prod([r[1] - r[0] for r in self.range_])", "def get_area(self):\n ### Original\n from pyresample.spherical_geometry import get_polygon_area\n\n return get_polygon_area(self.corners)\n ### End Original\n #from .spherical import SphPolygon\n #shell()\n #log.info('RUNNING SPHERICAL in get_area')\n\n #return SphPolygon(self.corners).area", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n num_rows = self.row_end - self.row_start\n num_cols = self.col_end - self.col_start\n area = num_rows*num_cols\n return area", "def area(self):\n return np.array([f.area() for f in self])", "def area(self):\n return math.pi * self._r ** 2", "def area(self):\r\n return math.pi*(self.__radius**2)", "def area(self):\n area = self.__width * self.__height\n return area" ]
[ "0.78390014", "0.7787656", "0.7762487", "0.771008", "0.7676024", "0.7494311", "0.73737156", "0.73681015", "0.7305198", "0.7286801", "0.72227156", "0.71913487", "0.7185521", "0.71353227", "0.71084666", "0.70956665", "0.7095528", "0.7091121", "0.7076087", "0.70625275", "0.70147765", "0.7004652", "0.6997977", "0.6994062", "0.6994062", "0.69868153", "0.69811183", "0.69329274", "0.6907783", "0.686524" ]
0.89519435
0
Get the geometry as wellknown text (WKT) without the SRID data. Returns StringValue String value
def as_text(self) -> ir.StringValue: return ops.GeoAsText(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getWKT(self):\n logger.debug(\"Entering in ocentricWKT.getWkt\")\n\n # building WKT string\n wkt = OcentricWKT.GEODCRS % (\n self.getGeoGcsName(), self.getDatumName(), self.getSpheroidName(), self.getRadius(), self.getInverseFlattening(),\n self.getRadius(), self.getAuthorityName(), self.getAuthorityCode()\n )\n\n logger.debug(\"Exiting from ocentricWKT.getWkt\")\n return wkt", "def as_ewkt(self) -> ir.StringValue:\n return ops.GeoAsEWKT(self).to_expr()", "def to_wkt(self):\n return _property_op(arctern.ST_AsText, self)", "def _get_geometry(self, val):\n g = OGRGeometry(val)\n return json.loads(g.json)", "def geometry_type(self) -> ir.StringValue:\n return ops.GeoGeometryType(self).to_expr()", "def __coordinate_system_to_str__(value_int):\n if value_int == GeometryTopologyData.IJK: return \"IJK\"\n elif value_int == GeometryTopologyData.RAS: return \"RAS\"\n elif value_int == GeometryTopologyData.LPS: return \"LPS\"\n return \"UNKNOWN\"", "def getquoted(self):\n if self.is_geometry:\n # Psycopg will figure out whether to use E'\\\\000' or '\\000'.\n return b\"%s(%s)\" % (\n b\"ST_GeogFromWKB\" if self.geography else b\"ST_GeomFromEWKB\",\n sql.quote(self.ewkb).encode(),\n )\n else:\n # For rasters, add explicit type cast to WKB string.\n return b\"'%s'::raster\" % self.ewkb.hex().encode()", "def wgs84_wkt():\n return WGS84.to_wkt()", "def GEOJsonToEWKT(dict): \n if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition \n return dict['__GEOSGeometry__'][1][0]\n \n return dict", "def getRawText(self):\n return self.graph.get(\"__rawTxt\", '')", "def wkt(self): # -> str:\n ...", "def get_spheroid(cls, wkt, string=True):\n if HAS_GDAL:\n srs = SpatialReference(wkt)\n sphere_params = srs.ellipsoid\n sphere_name = srs['spheroid']\n else:\n m = cls.spheroid_regex.match(wkt)\n if m: \n sphere_params = (float(m.group('major')), float(m.group('flattening')))\n sphere_name = m.group('name')\n else: \n return None\n \n if not string: \n return sphere_name, sphere_params\n else:\n # `string` parameter used to place in format acceptable by PostGIS\n if len(sphere_params) == 3:\n radius, flattening = sphere_params[0], sphere_params[2]\n else:\n radius, flattening = sphere_params\n return 'SPHEROID[\"%s\",%s,%s]' % (sphere_name, radius, flattening)", "def get_text(cls, quad):\n\t\ttext = ast.literal_eval(str(cls.get_address_value(quad.result)))\n\t\tx = cls.get_address_value(quad.left_operand)\n\t\ty = cls.get_address_value(quad.right_operand)\n\t\treturn [x, y, text]", "def get_geometry(id):\n geom = read_kml()\n result = geom[\"geometry\"][id]\n # print(f\"get_geometry(id={id.__repr__()}) --> {result}\")\n # result.plot()\n return result", "def getText(self):\n return self.graph.get(\"__txt\", '')", "def _utm_description(self):\n # 'PROJCS' vs. 'PROJCRS' in rsplit\n if int(gdal.VersionInfo()) >= 3000000:\n ifo = self._info['coordinateSystem']['wkt'].rsplit('PROJCRS[\"', 1)[-1].split('\"')[0]\n else:\n ifo = self._info['coordinateSystem']['wkt'].rsplit('PROJCS[\"', 1)[-1].split('\"')[0]\n return ifo", "def _getStringFeature(self):\n\n # create args\n bufferSize = 256\n valueToGet = create_string_buffer('\\000' * bufferSize)\n sizeFilled = c_uint32()\n\n errorCode = VimbaDLL.featureStringGet(self._handle,\n self._name,\n valueToGet,\n bufferSize,\n byref(sizeFilled))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return valueToGet.value", "def _parse_wkt(s):\n if s.startswith('SRID'):\n s = s[s.index(';') + 1:]\n return shapely.wkt.loads(s)", "def geometry():\n return Geometry()", "def getText(self):\n return _libsbml.TextGlyph_getText(self)", "def sa_wkb_to_wkt(sa_wkb):\n return gis_util.wkb_to_wkt(str(sa_wkb.geom_wkb))", "def geometry(self):\n return self[0].geometry", "def get_data_from_nonformat_text():\n pass", "def _get_shapes_text_values(fname, stream):\n\n # Maybe 2007+ file?\n r = _get_shapes_text_values_2007(fname)\n if (len(r) > 0):\n return r\n \n r = []\n try:\n # Read the WordDocument stream.\n ole = olefile.OleFileIO(fname, write_mode=False)\n if (not ole.exists(stream)):\n return []\n data = ole.openstream(stream).read()\n \n # It looks like maybe(?) the shapes text appears as ASCII blocks bounded by\n # 0x0D bytes. We will look for that.\n pat = r\"\\x0d[\\x20-\\x7e]{100,}\\x0d\"\n strs = re.findall(pat, data)\n #print \"STREAM: \" + str(stream)\n #print data\n #print \"^^^^^^^^^^^\"\n #print strs\n \n # Hope that the Shape() object indexing follows the same order as the strings\n # we found.\n pos = 1\n for shape_text in strs:\n\n # Access value with .TextFrame.TextRange.Text accessor.\n shape_text = shape_text[1:-1]\n var = \"Shapes('\" + str(pos) + \"').TextFrame.TextRange.Text\"\n r.append((var, shape_text))\n \n # Access value with .TextFrame.ContainingRange accessor.\n var = \"Shapes('\" + str(pos) + \"').TextFrame.ContainingRange\"\n r.append((var, shape_text))\n\n # Access value with .AlternativeText accessor.\n var = \"Shapes('\" + str(pos) + \"').AlternativeText\"\n r.append((var, shape_text))\n \n # Move to next shape.\n pos += 1\n\n # It looks like maybe(?) the shapes text appears as wide char blocks bounded by\n # 0x0D bytes. We will look for that.\n #pat = r\"\\x0d(?:\\x00[\\x20-\\x7e]){10,}\\x00?\\x0d\"\n pat = r\"(?:\\x00[\\x20-\\x7e]){100,}\"\n strs = re.findall(pat, data)\n \n # Hope that the Shape() object indexing follows the same order as the strings\n # we found.\n pos = 1\n for shape_text in strs:\n\n # Access value with .TextFrame.TextRange.Text accessor.\n shape_text = shape_text[1:-1].replace(\"\\x00\", \"\")\n var = \"Shapes('\" + str(pos) + \"').TextFrame.TextRange.Text\"\n r.append((var, shape_text))\n \n # Access value with .TextFrame.ContainingRange accessor.\n var = \"Shapes('\" + str(pos) + \"').TextFrame.ContainingRange\"\n r.append((var, shape_text))\n\n # Access value with .AlternativeText accessor.\n var = \"Shapes('\" + str(pos) + \"').AlternativeText\"\n r.append((var, shape_text))\n \n # Move to next shape.\n pos += 1\n \n except Exception as e:\n\n # Report the error.\n log.error(\"Cannot read associated Shapes text. \" + str(e))\n\n # See if we can read Shapes() info from an XML file.\n if (\"not an OLE2 structured storage file\" in str(e)):\n r = _get_shapes_text_values_xml(fname)\n\n return r", "def getGeometryType(restGeom):\n if \"Polygon\" in restGeom:\n return \"POLYGON\"\n elif \"Polyline\" in restGeom:\n return \"POLYLINE\"\n elif \"Point\" in restGeom:\n return \"POINT\"\n else:\n return \"Unknown\"", "def __unicode__(self):\n try:\n return unicode(self.srs)\n except:\n return unicode(self.wkt)", "def unit_of_measure(self):\n try:\n uom = self.metadata['geosoft']['dataset']['geo:unitofmeasurement']['#text']\n except KeyError:\n uom = ''\n return uom", "def _get_shapes_text_values_direct_2007(data):\n\n # TODO: This only handles a single Shapes object.\n \n # Get the name of the Shape element.\n pat1 = r'<v:shape\\s+id=\"(\\w+)\".+<w:txbxContent>'\n name = re.findall(pat1, data)\n if (len(name) == 0):\n return []\n name = name[0]\n\n # Get the text value(s) for the Shape.\n pat2 = r'<w:t[^<]*>([^<]+)</w:t[^<]*>'\n vals = re.findall(pat2, data)\n if (len(vals) == 0):\n return []\n\n # Reassemble the values.\n val = \"\"\n for v in vals:\n val += v\n val = val.replace(\"&amp\", \"&\")\n \n # Return the Shape name and text value.\n r = [(name, val)]\n return r", "def dftb_geom(name): \n dftb_geom = \"\"\"Geometry = GenFormat {\n <<< \"{{ title }}\"\n }\n \"\"\"\n return Environment().from_string(dftb_geom).render(title=name)", "def GetText(self):\r\n \r\n return self._text" ]
[ "0.65250087", "0.6227793", "0.6099172", "0.59958804", "0.59699154", "0.58982855", "0.5897963", "0.58915573", "0.5823331", "0.57201636", "0.5655366", "0.5626841", "0.5612404", "0.55262345", "0.5481621", "0.5471973", "0.5428957", "0.54264724", "0.5422329", "0.5410296", "0.53831935", "0.5360259", "0.5354276", "0.53458744", "0.5342485", "0.5325589", "0.53114516", "0.53039813", "0.5293654", "0.52668184" ]
0.6646094
0
Get the geometry as wellknown bytes (WKB) with the SRID data. Returns BinaryValue WKB value
def as_ewkb(self) -> ir.BinaryValue: return ops.GeoAsEWKB(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_wkb(self):\n return _property_op(lambda x: x, self)", "def wkb(self): # -> bytes:\n ...", "def as_binary(self) -> ir.BinaryValue:\n return ops.GeoAsBinary(self).to_expr()", "def _get_geometry(self, val):\n g = OGRGeometry(val)\n return json.loads(g.json)", "def wkb_hex(self): # -> str:\n ...", "def bytes_value(self) -> global___Expression.BytesValue:", "def get_bbox_data(self):\r\n with open(self.bboxes_local, 'r') as fbbox:\r\n data = fbbox.read()\r\n\r\n return data", "def get_binary(self):\n data = bytes()\n\n for tag in self._tags:\n value = 0\n if tag in self.fields.keys():\n value = self.fields[tag]\n try:\n data += struct.pack(\"<I\", value)\n except struct.error as e:\n raise TypeError(f\"expected integer value for {tag} but got {type(value)}: {value}\")\n\n return data", "def decodeBinary(self, wkb):\n value = binascii.a2b_hex(wkb)\n value = value[::-1]\n value = binascii.b2a_hex(value)\n return value", "def xst_bin(self):\n return self._xst_bin", "def wgs84_wkt():\n return WGS84.to_wkt()", "def api_bbox(bbox, srid=None, buffer=0.0):\n srid = srid or settings.SRID\n wkt_box = 'POLYGON(({0} {1}, {2} {1}, {2} {3}, {0} {3}, {0} {1}))'\n wkt = wkt_box.format(*bbox)\n native = wkt_to_geom(wkt, srid_from=srid)\n if srid != API_SRID:\n native.transform(API_SRID)\n if buffer > 0:\n extent = native.extent\n width = extent[2] - extent[0]\n native = native.buffer(width * buffer)\n return tuple(native.extent)", "def binary(self):\n return self.data.binary.values", "def sa_wkb_to_wkt(sa_wkb):\n return gis_util.wkb_to_wkt(str(sa_wkb.geom_wkb))", "def geometry(self, objectId):\n\n objectId = GeometryReference(objectId, self)\n req = urllib2.Request(self.baseUri + 'geometry/%d' % objectId.id)\n r = urllib2.urlopen(req)\n\n data = json.load(r)\n r.close()\n return data", "def _get_shape(geometry: WKBElement, crs) -> Optional[Geometry]:\n if geometry is None:\n return None\n\n shape = Geometry(to_shape(geometry), crs).to_crs(\"EPSG:4326\", wrapdateline=True)\n\n if not shape.is_valid:\n newshape = shape.buffer(0)\n assert math.isclose(\n shape.area, newshape.area, abs_tol=0.0001\n ), f\"{shape.area} != {newshape.area}\"\n shape = newshape\n return shape", "def geometry(self):\n return self[0].geometry", "def getBin(self):\n return self.fst_exe", "def getId(self):\n return _libsbml.BoundingBox_getId(self)", "def get_esys_blob(self, path: Union[bytes, str]) -> Tuple[bytes, Any]:\n path = _to_bytes_or_null(path)\n type_ = ffi.new(\"uint8_t *\")\n data = ffi.new(\"uint8_t **\")\n length = ffi.new(\"size_t *\")\n ret = lib.Fapi_GetEsysBlob(self._ctx, path, type_, data, length)\n _chkrc(ret)\n return bytes(ffi.unpack(_get_dptr(data, lib.Fapi_Free), length[0])), type_[0]", "def regional_data_boundary(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"regional_data_boundary\")", "def read_binary(self):\n length = self.read_uint32()\n bytes = self.data[:length]\n self.data = self.data[length:]\n return bytes", "def b64raster(self):\n r = self.craster()\n if r:\n if len(r) == 1:\n return r\n return b64encode(r)\n else:\n return None", "def getTypeCode(self):\n return _libsbml.BoundingBox_getTypeCode(self)", "def geometry(self):\n return self._geometry", "def geometry(self):\n return self._geometry", "def to_bytes(self, byteorder=\"little\"):\n return self._value.to_bytes(self.width(), byteorder=byteorder)", "def getBoundingBox(filepath):\n datasource = ogr.Open(filepath)\n geo_dict = {}\n\n for layer in datasource:\n layer_name = layer.GetDescription()\n ext = layer.GetExtent()\n bbox = [ext[0], ext[2], ext[1], ext[3]]\n\n try:\n spatial_ref = layer.GetSpatialRef()\n spatial_ref.AutoIdentifyEPSG()\n crs = spatial_ref.GetAuthorityCode(None)\n except Exception as e:\n logger.debug(\"Error extracting EPSG CODE from layer {}: \\n {}\".format(layer_name, e))\n crs = None\n\n # Patch GDAL > 3.2 for GML https://github.com/OSGeo/gdal/issues/2195\n if int(osgeo.__version__[0]) >= 3 and int(osgeo.__version__[2]) < 2 and datasource.GetDriver().GetName() ==\"GML\":\n bbox = [ext[2], ext[0], ext[3], ext[1]]\n\n geo_dict[layer_name] = {\"bbox\": bbox, \"crs\": crs}\n\n if bbox == null_island or crs is None:\n logger.debug(\"Layer {} does not have identifiable geographic extent. CRS may be missing.\".format(layer_name))\n del geo_dict[layer_name][\"crs\"]\n\n bbox_merge = hf.bbox_merge(geo_dict, filepath)\n\n spatial_extent = None\n\n if bbox_merge is not None:\n if len(bbox_merge) != 0:\n spatial_extent = bbox_merge\n\n return spatial_extent", "def getquoted(self):\n if self.is_geometry:\n # Psycopg will figure out whether to use E'\\\\000' or '\\000'.\n return b\"%s(%s)\" % (\n b\"ST_GeogFromWKB\" if self.geography else b\"ST_GeomFromEWKB\",\n sql.quote(self.ewkb).encode(),\n )\n else:\n # For rasters, add explicit type cast to WKB string.\n return b\"'%s'::raster\" % self.ewkb.hex().encode()", "def decode_geometry(geom: str) -> BasePolygon:\n return shape(geobuf.decode(bytes.fromhex(geom))).buffer(0)" ]
[ "0.586647", "0.5865369", "0.5783438", "0.5653033", "0.56270975", "0.5481678", "0.5458133", "0.5417851", "0.53809196", "0.5372961", "0.5362368", "0.53046584", "0.52930826", "0.52646327", "0.5259626", "0.5209731", "0.52090925", "0.5139013", "0.51351386", "0.51294726", "0.5106598", "0.5105523", "0.51050496", "0.5076978", "0.5040219", "0.5040219", "0.5031134", "0.5024018", "0.5017174", "0.5016612" ]
0.6025436
0
Check if `self` is entirely within `distance` from `right`.
def d_fully_within( self, right: GeoSpatialValue, distance: ir.FloatingValue, ) -> ir.BooleanValue: return ops.GeoDFullyWithin(self, right, distance).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def d_within(\n self,\n right: GeoSpatialValue,\n distance: ir.FloatingValue,\n ) -> ir.BooleanValue:\n return ops.GeoDWithin(self, right, distance).to_expr()", "def within_distance(self, point, distance):\n return all(distance >= seg.shortest_distance_to(point)\n for seg in self.segments)", "def around(self, point, distance):\n return self.distance(point) <= distance", "def collision(self, player, distance=None):\n if distance == None:\n distance = 2 * self.radius\n return self.pos.distance(player.pos) < distance", "def _has_right(self, index):\r\n return self._right(index) < len(self)", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def within(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoWithin(self, right).to_expr()", "def collides_with(self, other):\n\t\tdistance = self.position.distance_to(other.position) # Vector2.distance_to()\n\t\treturn distance < self.radius + other.radius", "def close(self, other):\n return (self.calculate_distance(other) < 100)", "def is_closeby(self, lat1, lon1, lat2, lon2, distance=0.5):\n\n distance = geopy.distance.distance((lat1, lon1), (lat2, lon2)).km < distance\t \n \n return(distance)", "def in_distance(a, b, d):\n return distance(a, b) <= d", "def covered_by(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoCoveredBy(self, right).to_expr()", "def covers(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoCovers(self, right).to_expr()", "def _has_right(self, j):\n return self._right(j) < len(self._data)", "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def __contains__(self, position):\n return sum([(c1 - c2) ** 2 for (c1, c2) in zip(self.position, position)]) <= self.radius", "def _has_right(self, j):\n return (2 * j + 2) < len(self)", "def distance(self, right: GeoSpatialValue) -> ir.FloatingValue:\n return ops.GeoDistance(self, right).to_expr()", "def is_within_distance(target_location, current_location, orientation, max_distance, d_angle_th_up, d_angle_th_low=0):\n target_vector = np.array([target_location.x - current_location.x, target_location.y - current_location.y])\n norm_target = np.linalg.norm(target_vector)\n\n # If the vector is too short, we can simply stop here\n if norm_target < 0.001:\n return True\n\n if norm_target > max_distance:\n return False\n\n forward_vector = np.array(\n [math.cos(math.radians(orientation)), math.sin(math.radians(orientation))])\n d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))\n\n return d_angle_th_low < d_angle < d_angle_th_up", "def is_within_distance(target_location, current_location, orientation, max_distance, d_angle_th_up, d_angle_th_low=0):\n target_vector = np.array([target_location.x - current_location.x, target_location.y - current_location.y])\n norm_target = np.linalg.norm(target_vector)\n\n # If the vector is too short, we can simply stop here\n if norm_target < 0.001:\n return True\n\n if norm_target > max_distance:\n return False\n\n forward_vector = np.array(\n [math.cos(math.radians(orientation)), math.sin(math.radians(orientation))])\n d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))\n\n return d_angle_th_low < d_angle < d_angle_th_up", "def collide_other_tower(self, other_tower):\n x2 = other_tower.x\n y2 = other_tower.y\n\n dis = math.sqrt((x2 - self.x) ** 2 + (y2 - self.y) ** 2)\n if dis >= 100:\n return False\n else:\n return True", "def is_visible(left, right):\n if not isinstance(left, Actor) or not isinstance(right, Actor):\n return False\n\n if left == right:\n return False\n\n is_alive = right.state != ActorState.DEATH\n radius = left.statistics.attack_range + left.radius + right.radius\n real_distance = (left.position - right.position).length()\n return is_alive and real_distance < radius", "def is_straight(distance_travel_x, distance_travel_y):\r\n if (distance_travel_x > 0 and distance_travel_y == 0) or (distance_travel_x == 0 and distance_travel_y > 0):\r\n return True\r\n else:\r\n return False", "def out_of_bounds(self):\n return self.rect.right <= 0", "def coordinate_positions_compare(self, other, r=10):\r\n # get max radius of forgiveness\r\n if isinstance(self[0], list): # [(x, y), r] case\r\n r = max(self[1], r)\r\n x1, y1 = self[0]\r\n else:\r\n x1, y1 = self\r\n\r\n if isinstance(other[0], list): # [(x, y), r] case\r\n r = max(other[1], r)\r\n x2, y2 = other[0]\r\n else:\r\n x2, y2 = other\r\n\r\n if (x2 - x1) ** 2 + (y2 - y1) ** 2 > r * r:\r\n return False\r\n\r\n return True", "def will_hit_edge(self, direction):\n return ((self.position <= 0 and direction.is_left()) or \n (self.position >= self.scene.step_count - 1 and \n direction.is_right()))", "def links_with(self, other, tollerance = 0.05):\n return (\n self.start.distance_to(other.start) < tollerance or\n self.start.distance_to(other.end) < tollerance or\n self.end.distance_to(other.end) < tollerance or\n self.end.distance_to(other.start) < tollerance\n )", "def check_point_right(nodeL, nodeR, city):\n A = get_city_points(city)\n B = get_node_points(nodeL)\n C = get_node_points(nodeR)\n slope = _slope(A, B)\n (F, G) = calibrator(A, B, slope)\n sign = math.copysign(1, ((G[0] - F[0]) * (C[1] - F[1]) - (G[1] - F[1]) * (C[0] - F[0])))\n\n if slope == \"horizontal\":\n if sign == 1:\n if A[0] > B[0]:\n return True\n else:\n return False\n else:\n if A[0] < B[0]:\n return True\n else:\n return False\n\n if slope == \"vertical\":\n if sign == 1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False\n\n if slope == \"inclined\":\n if sign == 1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False\n\n if slope == \"declined\":\n if sign == 1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False", "def has_right(self):\n return self.right != None", "def tooTight(self, row, col, i, j):\n return self.distanceToGoal[row + i][col] == self.infinity or \\\n self.distanceToGoal[row][col + j] == self.infinity" ]
[ "0.7167458", "0.66897726", "0.65852267", "0.6398431", "0.631626", "0.6159795", "0.6069909", "0.60572755", "0.5953424", "0.59026295", "0.5893493", "0.5875279", "0.57910955", "0.5758687", "0.5736612", "0.57315516", "0.57160324", "0.5710056", "0.57049614", "0.57049614", "0.56849515", "0.5652183", "0.5628308", "0.562368", "0.55995303", "0.5593494", "0.5579947", "0.5567555", "0.55643094", "0.5560794" ]
0.7242819
0
Check if `self` is partially within `distance` from `right`.
def d_within( self, right: GeoSpatialValue, distance: ir.FloatingValue, ) -> ir.BooleanValue: return ops.GeoDWithin(self, right, distance).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def d_fully_within(\n self,\n right: GeoSpatialValue,\n distance: ir.FloatingValue,\n ) -> ir.BooleanValue:\n return ops.GeoDFullyWithin(self, right, distance).to_expr()", "def within_distance(self, point, distance):\n return all(distance >= seg.shortest_distance_to(point)\n for seg in self.segments)", "def around(self, point, distance):\n return self.distance(point) <= distance", "def collision(self, player, distance=None):\n if distance == None:\n distance = 2 * self.radius\n return self.pos.distance(player.pos) < distance", "def within(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoWithin(self, right).to_expr()", "def _has_right(self, index):\r\n return self._right(index) < len(self)", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def covered_by(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoCoveredBy(self, right).to_expr()", "def covers(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoCovers(self, right).to_expr()", "def is_closeby(self, lat1, lon1, lat2, lon2, distance=0.5):\n\n distance = geopy.distance.distance((lat1, lon1), (lat2, lon2)).km < distance\t \n \n return(distance)", "def collides_with(self, other):\n\t\tdistance = self.position.distance_to(other.position) # Vector2.distance_to()\n\t\treturn distance < self.radius + other.radius", "def in_distance(a, b, d):\n return distance(a, b) <= d", "def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False", "def __contains__(self, position):\n return sum([(c1 - c2) ** 2 for (c1, c2) in zip(self.position, position)]) <= self.radius", "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def distance(self, right: GeoSpatialValue) -> ir.FloatingValue:\n return ops.GeoDistance(self, right).to_expr()", "def is_within_distance(target_location, current_location, orientation, max_distance, d_angle_th_up, d_angle_th_low=0):\n target_vector = np.array([target_location.x - current_location.x, target_location.y - current_location.y])\n norm_target = np.linalg.norm(target_vector)\n\n # If the vector is too short, we can simply stop here\n if norm_target < 0.001:\n return True\n\n if norm_target > max_distance:\n return False\n\n forward_vector = np.array(\n [math.cos(math.radians(orientation)), math.sin(math.radians(orientation))])\n d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))\n\n return d_angle_th_low < d_angle < d_angle_th_up", "def is_within_distance(target_location, current_location, orientation, max_distance, d_angle_th_up, d_angle_th_low=0):\n target_vector = np.array([target_location.x - current_location.x, target_location.y - current_location.y])\n norm_target = np.linalg.norm(target_vector)\n\n # If the vector is too short, we can simply stop here\n if norm_target < 0.001:\n return True\n\n if norm_target > max_distance:\n return False\n\n forward_vector = np.array(\n [math.cos(math.radians(orientation)), math.sin(math.radians(orientation))])\n d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))\n\n return d_angle_th_low < d_angle < d_angle_th_up", "def contains_properly(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoContainsProperly(self, right).to_expr()", "def dwithin(a, b, distance, **kwargs):\n return lib.dwithin(a, b, distance, **kwargs)", "def close(self, other):\n return (self.calculate_distance(other) < 100)", "def is_distance_ahead(point_a, point_b, distance):\n global world\n\n _, route = interpolate_trajectory(world, [point_a.location, point_b.location])\n\n return distance < estimate_route_distance(route)", "def _has_right(self, j):\n return self._right(j) < len(self._data)", "def overlaps(self, other, particle_r):\n return self.distance_to(other) < (particle_r ** 2)", "def check_point_right(nodeL, nodeR, city):\n A = get_city_points(city)\n B = get_node_points(nodeL)\n C = get_node_points(nodeR)\n slope = _slope(A, B)\n (F, G) = calibrator(A, B, slope)\n sign = math.copysign(1, ((G[0] - F[0]) * (C[1] - F[1]) - (G[1] - F[1]) * (C[0] - F[0])))\n\n if slope == \"horizontal\":\n if sign == 1:\n if A[0] > B[0]:\n return True\n else:\n return False\n else:\n if A[0] < B[0]:\n return True\n else:\n return False\n\n if slope == \"vertical\":\n if sign == 1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False\n\n if slope == \"inclined\":\n if sign == 1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False\n\n if slope == \"declined\":\n if sign == 1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False", "def _has_right(self, j):\n return (2 * j + 2) < len(self)", "def intersects(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoIntersects(self, right).to_expr()", "def collide_other_tower(self, other_tower):\n x2 = other_tower.x\n y2 = other_tower.y\n\n dis = math.sqrt((x2 - self.x) ** 2 + (y2 - self.y) ** 2)\n if dis >= 100:\n return False\n else:\n return True", "def links_with(self, other, tollerance = 0.05):\n return (\n self.start.distance_to(other.start) < tollerance or\n self.start.distance_to(other.end) < tollerance or\n self.end.distance_to(other.end) < tollerance or\n self.end.distance_to(other.start) < tollerance\n )", "def contains(self, possible_point):\n# if possible_point == self.endpoints[0] or possible_point == self.endpoints[1]:\n# return False\n distance = sum(possible_point.distance_to(p) for p in self.endpoints)\n return abs(distance - self.length()) < 0.0000001" ]
[ "0.7420986", "0.6674265", "0.6413584", "0.61454713", "0.6127788", "0.5950731", "0.59127736", "0.5828475", "0.5825035", "0.5794298", "0.57647914", "0.56845057", "0.5664932", "0.5646242", "0.56445026", "0.5642126", "0.5579724", "0.5579724", "0.5560193", "0.55255526", "0.5500712", "0.5440243", "0.5427561", "0.5427507", "0.54043794", "0.5403439", "0.5389688", "0.53834", "0.538146", "0.53803885" ]
0.7225795
1
Get the 1based Nth geometry of a multi geometry.
def geometry_n(self, n: int | ir.IntegerValue) -> GeoSpatialValue: return ops.GeoGeometryN(self, n).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMultiGeometry(geometry):\n geom = arcpy.Array()\n for feature in geometry:\n array = arcpy.Array()\n for point in feature:\n point = arcpy.Point(float(point[0]), float(point[1]))\n array.add(point)\n geom.add(array)\n return geom", "def geom_single(self, g_num):\n\n # Just return the appropriate geometry vector\n geom = self.geoms[g_num]\n return geom", "def geometry(self):\n return self[0].geometry", "def _multigeometry(self, ogr_geometry):\n\n geo_type = ogr_geometry.GetGeometryType()\n\n if geo_type == ogr.wkbPolygon:\n return ogr.ForceToMultiPolygon(ogr_geometry)\n elif geo_type == ogr.wkbPoint:\n return ogr.ForceToMultiPoint(ogr_geometry)\n elif geo_type in [ogr.wkbLineString, ogr.wkbLinearRing]:\n return ogr.ForceToMultiLineString(ogr_geometry)\n else:\n return ogr_geometry", "def get_geometry(self, selection_name):", "def get_geometry(self):\n rows, cols = self.get_gridspec().get_geometry()\n return rows, cols, self.num1, self.num2", "def geometry():\n return Geometry()", "def get_specific_tile(idx, tiles_gdf):\n tile_poly = tiles_gdf.iloc[idx]['geometry']\n # print(tile_poly.bounds)\n return tile_poly", "def get_multi_index(self):\n return self.basis.elements", "def get_outer_rings(feature_or_geometry):\n mp = Geometry.get_multipolygon(feature_or_geometry)\n return [[t[0]] for t in mp]", "def closest_object(geometries, point): \n min_dist, min_index = min((point.distance(geom), k) \n for (k, geom) in enumerate(geometries))\n \n return geometries[min_index], min_dist, min_index", "def nth(f, *N):\n return dmp_ground_nth(f.rep, N, f.lev, f.dom)", "def get_geometry(self):\n geometry = self._geometry\n for geo in self._holes:\n geometry = geometry.difference(geo) \n return geometry", "def get_multipolygon(request, location):\n geometries = request.data.get('FeatureCollection', None)\n if geometries is not None:\n geometry_list = []\n for g in geometries['features']:\n if g['geometry']['type'] == 'Point':\n g = point_to_polygon_geojson(g)\n geometry_list.append(GEOSGeometry(json.dumps(g['geometry'])))\n lng, lat = location['Longitude']['Value'], location['Latitude']['Value']\n if lat is not None and lng is not None:\n loc_point = {'type': 'Feature', 'properties': {}, 'geometry': {'type': 'Point', 'coordinates': [lng, lat]}}\n loc_polygon = point_to_polygon_geojson(loc_point)\n geometry_list.append(GEOSGeometry(json.dumps(loc_polygon['geometry'])))\n return MultiPolygon(geometry_list)\n return None", "def __getGeometry(self, geom):\n if \"POLYGON\" in self.geometryType:\n rings = geom['rings']\n polygon = getMultiGeometry(rings)\n polyGeom = arcpy.Polygon(polygon, self.sr)\n return polyGeom\n elif \"POLYLINE\" in self.geometryType:\n paths = geom['paths']\n polyline = getMultiGeometry(paths)\n lineGeom = arcpy.Polyline(polyline, self.sr)\n return lineGeom\n elif \"POINT\" in self.geometryType:\n try:\n point = arcpy.Point(float(geom['x']), float(geom['y']))\n except:\n raise NullGeometryError(\"Point geometry is invalid or null\")\n pointGeom = arcpy.Geometry(\"point\", point, self.sr)\n return pointGeom", "def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin\n return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)", "def geometry(rdm):\n rdm = _rd_chem.AddHs(rdm)\n atms = rdm.GetAtoms()\n natms = len(rdm.GetAtoms())\n if natms == 1:\n asb = atms[0].GetSymbol()\n xyz = (0., 0., 0.)\n geo = ((asb, xyz),)\n else:\n _rd_all_chem.EmbedMolecule(rdm)\n _rd_all_chem.MMFFOptimizeMolecule(rdm)\n asbs = tuple(rda.GetSymbol() for rda in atms)\n xyzs = tuple(map(tuple, rdm.GetConformer(0).GetPositions()))\n geo = tuple(zip(asbs, xyzs))\n return geo", "def getelem(self,num):\n #return self.M.conf()['elements'][num]\n return self.lat[num]", "def geometry(self, objectId):\n\n objectId = GeometryReference(objectId, self)\n req = urllib2.Request(self.baseUri + 'geometry/%d' % objectId.id)\n r = urllib2.urlopen(req)\n\n data = json.load(r)\n r.close()\n return data", "def get_grid(self, n):\n\n grid_arrangement = self.get_grid_arrangement(n)\n return self.get_gridspec(grid_arrangement)", "def geomFromInteriorPoints(coords):\n if isinstance(coords, numpy.ndarray):\n coords = coords.tolist()\n geomDict = {'type':'MultiPoint', 'coordinates':coords}\n geomPoints = ogr.CreateGeometryFromJson(repr(geomDict))\n return geomPoints", "def getGeometry(self):\n return self.geometry", "def getGeometry(self):\n return self.geometry", "def p1(self):\n return tuple(self.rect[:2])", "def get_slice(self, n):\n if n == 0:\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)\n raise IndexError(f\"{n} is invalid for a 1 dimension Slice \")", "def getGroup(self, index):\n index = int(index)\n if index < 0:\n return self.top_group1\n elif index > (self.layers - 1):\n index = (self.layers - 1)\n return self.groups[index]", "def firstThree(catalog):\n return model.firstThree(catalog)", "def nth(iterable, index):\n return next(itertools.islice(iterable, index, None))", "def get_nth_user_gate(self, index):\n return self.get_g_sect()[index]", "def get_single_medium(scene):\n shapes = scene.shapes()\n assert len(shapes) == 1, f'Not supported: more than 1 shape in the scene (found {len(shapes)}).'\n medium = shapes[0].interior_medium()\n assert medium is not None, 'Expected a single shape with an interior medium.'\n return medium" ]
[ "0.65626216", "0.59952974", "0.5883551", "0.5852187", "0.55755234", "0.5488048", "0.52828753", "0.52417064", "0.5209669", "0.52007324", "0.5179337", "0.5148148", "0.50144815", "0.49956", "0.49947926", "0.49943653", "0.49615914", "0.4940357", "0.4906351", "0.4902437", "0.48999467", "0.48958105", "0.48958105", "0.48653057", "0.48512548", "0.4847388", "0.4844068", "0.48370996", "0.4833812", "0.482493" ]
0.64696354
1
Get the type of a geometry. Returns StringValue String representing the type of `self`.
def geometry_type(self) -> ir.StringValue: return ops.GeoGeometryType(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_geometry_type(self):\n return self.geometry_type", "def get_geometry_type(self):\n return self._geometry_type", "def geom_type(self):\n return _property_op(arctern.ST_GeometryType, self)", "def geom_type(self): # -> str:\n ...", "def GetObjectTypeString(type):\n return _gmat_py.GmatBase_GetObjectTypeString(type)", "def get_type(self) -> str:\n return self.type", "def get_type(self) -> str:\n raise NotImplementedError", "def get_type(self) -> str:\n raise NotImplementedError", "def GmatBase_GetObjectTypeString(type):\n return _gmat_py.GmatBase_GetObjectTypeString(type)", "def getGeometryType(restGeom):\n if \"Polygon\" in restGeom:\n return \"POLYGON\"\n elif \"Polyline\" in restGeom:\n return \"POLYLINE\"\n elif \"Point\" in restGeom:\n return \"POINT\"\n else:\n return \"Unknown\"", "def get_type(self) -> TypeStr:\n return TYPE.inverse[self.type()]", "def type_as_string(self):\n return self.properties.get('TypeAsString', None)", "def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Type\"]", "def type(self) -> str:\n return self._type", "def type(self) -> str:\n return self._type", "def type(self) -> str:\n return self._type", "def type(self) -> str:\n return self.type_", "def type(self) -> str:\n\n return self._type", "def geometry_type(number):\n try:\n return GDAL_GEOMETRY_TYPES[number]\n except KeyError:\n return", "def get_type(self) -> str:\n return self.row_dict['type']", "def type(self):\n if self._type is None:\n self._type = repr(self.t)\n return self._type", "def type_name(self) -> str: # pragma: no cover\n return repr_type(self.type_obj)", "def get_type (self):\n return self._stype", "def type_name(self):\n return self._type_name", "def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Sns\"][\"Type\"]", "def get_type(self):\n return self.type", "def get_type(self):\n return self.type", "def get_type(self):\n return self._type", "def get_type(self):\n return self._type", "def type(self):\n return self._getValue('type')" ]
[ "0.76413125", "0.758172", "0.7331979", "0.711782", "0.69186944", "0.6886925", "0.6850519", "0.6850519", "0.68425244", "0.68053585", "0.67596257", "0.6710633", "0.6700815", "0.6681921", "0.6681921", "0.6681921", "0.66299194", "0.6629663", "0.6625415", "0.6621467", "0.65781057", "0.6572099", "0.64119154", "0.6394997", "0.63894194", "0.6364565", "0.6364565", "0.63623244", "0.63623244", "0.6361028" ]
0.82591885
0
Compute the distance between two geospatial expressions.
def distance(self, right: GeoSpatialValue) -> ir.FloatingValue: return ops.GeoDistance(self, right).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance(a, b):\n return vincenty((float(a.longitude), float(a.latitude)),\n (float(b.longitude), float(b.latitude))).km", "def distance(self, a, b):\n \n # -----------------------------\n # Your code\n '''R = 3963 # radius of Earth (miles)\n lat1, lon1 = math.radians(a[0]), math.radians(a[1])\n lat2, lon2 = math.radians(b[0]), math.radians(b[1])\n \n return math.acos(math.sin(lat1) * math.sin(lat2) + math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R*0.000621371'''\n return abs(a[0] - b[0]) + abs(a[1] - b[1])\n \n \n # -----------------------------", "def distance(self, coord1, coord2):\n\n delta_x = self.delta_long_miles(coord1.lat, coord1.delta_long(coord2))\n delta_y = self.delta_lat_miles(coord1.delta_lat(coord2))\n\n return self.cartesian_dist(delta_x, delta_y)", "def nn_distance(xyz1, xyz2):\n return _op_library.nn_distance(xyz1, xyz2)", "def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)", "def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def distance(coords1, coords2):\n dx = coords1.x - coords2.x\n dy = coords1.y - coords2.y\n return math.sqrt(dx * dx + dy * dy)", "def distance(self, other):\n return _binary_op(arctern.ST_Distance, self, other)", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def geodesicDistance(A, B = geolocate(\"Colosseo\")):\n # colosseo = (41.890183, 12.492369)\n return geopy.distance.vincenty(A, B).meters", "def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )", "def _distance(coord1, coord2):\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist*xdist + ydist*ydist)", "def distance(self, a, b):\n raise NotImplementedError()", "def CalculateDistance(q1, q2):\r\n return np.sqrt((q1[0] - q2[0])**2 + (q1[1] - q2[1])**2)", "def _Dist(coordinates1, coordinates2):\n if len(coordinates2) == 1:\n return math.abs(coordinates2[0] - coordinates1[0])\n elif len(coordinates2) == 2:\n return math.sqrt(\n math.pow(\n coordinates2[0] - coordinates1[0], 2) \n + math.pow(coordinates2[1] - coordinates1[1], 2)\n )\n else:\n return distance.euclidean(coordinates2, coordinates1)", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def get_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)", "def distance(self,coord_1, coord_2):\n return np.sqrt(np.sum((np.array(coord_1)-np.array(coord_2))**2))", "def distance(gps1, gps2):\n return haversine(gps1.lng, gps1.lat, gps2.lng, gps2.lat)", "def calc_dist(c1: Coordinates, c2: Coordinates = None) -> float:\n\t\n\t# Get distances for each dimension in a common unit, meters.\n\tlat_dist = (c1.lat - c2.lat) * LAT_RATIO\n\tlong_dist = (c1.lon - c2.lon) * LONG_RATIO\n\treturn math.sqrt(lat_dist**2 + long_dist**2)", "def distance(xy1, xy2):\n x_dist = xy2[0] - xy1[0]\n y_dist = xy2[1] - xy1[1]\n dist = np.sqrt(x_dist ** 2 + y_dist ** 2)\n return dist", "def distance(latitude_1: float, longitude_1: float, latitude_2: float, longitude_2: float) -> float:\n lat1, lon1, lat2, lon2 = map(radians, (latitude_1, longitude_1, latitude_2, longitude_2))\n return (\n 2\n * EARTH_RADIUS\n * asin(\n sqrt(\n sin((lat2 - lat1) / 2) ** 2 + cos(lat1) * cos(lat2) * (sin((lon2 - lon1) / 2) ** 2)\n )\n )\n )", "def gpx_distance(lat1, lon1, lat2, lon2):\n theta = lon1 - lon2\n rads = sin(radians(lat1)) * sin(radians(lat2)) + cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(theta))\n\n # make sure rads is [-1, 1]\n rads = 1 if rads > 1 else rads\n rads = -1 if rads < -1 else rads\n\n rads = acos(rads)\n\n # multiply by radius of the earth to get distance\n return rads * 6367", "def distance_checker(xyz1, xyz2):\n return math.sqrt((xyz1[0] - xyz2[0])**2 + (xyz1[1] - xyz2[1])**2 +\n (xyz1[2] - xyz2[2])**2)", "def distance(pos1, pos2):\n return math.sqrt((pos1[0] - pos2[0])**2. + (pos1[1] - pos2[1])**2.)", "def calculate_distance(coords1, coords2):# 3 sets of double quotes allows to give a description for help\n distance_x = coords1[0] - coords2[0]\n distance_y = coords1[1] - coords2[1]\n distance_z = coords1[2] - coords2[2]\n distance = numpy.sqrt(distance_x**2 + distance_y**2 + distance_z**2)\n return distance", "def calculate_distance(atom1,atom2): #dot string to show when you go into the help doc of this function\n x_distance = atom1[0]-atom2[0]\n y_distance = atom1[1]-atom2[1]\n z_distance = atom1[2]-atom2[2]\n distance = numpy.sqrt(x_distance**2+ y_distance**2+z_distance**2)\n return distance" ]
[ "0.67492944", "0.67315775", "0.6720346", "0.66882116", "0.66505593", "0.6646785", "0.66209525", "0.6602943", "0.6596843", "0.65880513", "0.6574677", "0.6560092", "0.6550573", "0.6542965", "0.6534998", "0.65274507", "0.6516017", "0.64990103", "0.6498941", "0.648396", "0.647728", "0.6465703", "0.6447616", "0.6435698", "0.64354604", "0.6432054", "0.64048636", "0.6399925", "0.63961303", "0.6387629" ]
0.6739062
1
Compute the length of a geospatial expression. Returns FloatingValue Length of `self`
def length(self) -> ir.FloatingValue: return ops.GeoLength(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def length(self):\n return _property_op(arctern.ST_Length, self)", "def getLength(self):\n return self.geometry.length", "def length(self) -> float:\n n = self.geodesic.extrinsicDimension()\n third = 1.0/3.0\n def distance(x,y):\n cp0 = x[:n]\n cp1 = self.geodesic.integrate(cp0,vectorops.mul(x[n:],third))\n cp3 = y[:n]\n cp2 = self.geodesic.integrate(cp3,vectorops.mul(y[n:],-third))\n return self.geodesic.distance(cp0,cp1) + self.geodesic.distance(cp1,cp2) + self.geodesic.distance(cp2,cp3)\n return Trajectory.length(self,distance)", "def get_length(self):\n return math.sqrt(self.x**2 + self.y**2)", "def length(self):\n return math.sqrt(self.x * self.x + self.y * self.y)", "def length(self):\n return math.sqrt(self.x**2 + self.y**2 + self.z**2)", "def length(self) -> ir.IntegerValue:\n return ops.MapLength(self).to_expr()", "def Length(self):\n return math.sqrt(self.x**2 + self.y**2 + self.z**2)", "def length(self):\r\n\r\n return math.sqrt(self*self)", "def get_length(self) -> np.float64:\n\n return np.float64(\n sqrt(\n (self.node1.x - self.node2.x) ** 2\n + (self.node1.y - self.node2.y) ** 2\n )\n )", "def getLength(self):\n flength = 0\n for quad in self._quadrilaterals:\n flength = flength + get_quad_length(quad)\n return flength", "def lenght(self):\n from math import sqrt\n\n #nicer notation to make it easier to read.\n\n a, b = self.x, self.y\n\n return sqrt(a**2 + b**2)", "def length(self):\n return np.sqrt(np.sum(self**2, axis=1)).view(np.ndarray)", "def Length(self):\n xyza = self.ga_ref.get_position() + self.wa\n xyzb = self.gb_ref.get_position() + self.wb\n if self.gc is not None:\n xyzc = self.gc_ref.get_position() + self.wc\n xa, ya, za = xyza\n length = self._integrate(\n xyza - xa,\n xyzb - ya,\n xyzc - za,\n )\n else:\n length = np.linalg.norm(xyzb - xyza)\n return length", "def length(self) -> npt.NDArray[np.float_]:\n return dist(*self.vertices)", "def _get_length(self):\n from math import sqrt\n\n if self._length is None:\n sum1 = 0\n for a in self.diff:\n sum1 += a * a\n self._length = sqrt(sum1)\n return self._length", "def length(self) -> float:\n return pos.distance(self.start, self.end)", "def get_length_sqrd(self):\n return self.x**2 + self.y**2", "def getLength(self) -> float:\n return self.length", "def getLength(self):\n return self.vector.norm", "def length(self) -> float:\n n = len(self.milestones[0])//2\n third = 1.0/3.0\n def distance(x,y):\n cp0 = x[:n]\n cp1 = vectorops.madd(cp0,x[n:],third)\n cp3 = y[:n]\n cp2 = vectorops.madd(cp3,y[n:],-third)\n return third*vectorops.norm(x[n:]) + vectorops.distance(cp1,cp2) + third*vectorops.norm(y[n:])\n return Trajectory.length(self,distance)", "def length(self):\n return pyvista.Box(self.bounds).length", "def length(self):\n points = [Point(v, crs=self.crs) for v in self.vertices]\n distances = [a.distance(b) for a, b in zip(points[:-1], points[1:])]\n return sum(distances)", "def length(self):\n if self._length_cache is None:\n cls = type(self)\n func = cls._length_extraction_fn()\n preprocessed_func = cls.preprocess_func(func)\n self._length_cache = self.apply(preprocessed_func)\n return self._length_cache", "def calc_length(self):\n return AtomMath.length(self.atom1.position - self.atom2.position)", "def calculate_length(self):\n raise NotImplementedError", "def length(self):\n return self.get_delta_value(self.Z_INDEX)", "def euclidian_length(self):\n\n if self.get_len() > 1:\n shape_length = 0\n last_x = self.x\n last_y = self.y\n scale = [0]\n for i in range(self.len - 2):\n x = np.array(self.x[i + 1])\n y = np.array(self.y[i + 1])\n last_x = np.array(self.x[i])\n last_y = np.array(self.y[i])\n shape_length += np.sqrt((x - last_x) ** 2 + (y - last_y) ** 2)\n scale.append(shape_length)\n return shape_length, scale\n\n else:\n return 0, [0]", "def length(self):\n return _lattice.length(self._accelerator.lattice)", "def length(self):\n return self.length2 ** 0.5" ]
[ "0.759075", "0.75095755", "0.7373766", "0.73544824", "0.7261306", "0.7236807", "0.7201984", "0.70979667", "0.70661324", "0.7026694", "0.6963566", "0.6913003", "0.68597555", "0.684015", "0.6811498", "0.67878556", "0.677738", "0.6695058", "0.66927135", "0.6674483", "0.6637787", "0.65893817", "0.65390253", "0.6538177", "0.6476859", "0.6476053", "0.645567", "0.6419487", "0.639454", "0.63512594" ]
0.8851043
0
Compute the perimeter of a geospatial expression. Returns FloatingValue Perimeter of `self`
def perimeter(self) -> ir.FloatingValue: return ops.GeoPerimeter(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius", "def getPerimeter(self):\n return 2 * math.pi * self.__radius", "def perimeter(self):\n perimeter = (2 * self.__length) + (2 * self.__width)\n\n return perimeter", "def calculateperimeter(self):\r\n return (self.width * 2) + (self.height * 2)", "def perimeter(self):\n return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2", "def perimeter(self):\n\t\treturn 2 * (self.width + self.height)", "def perimeter(self):\r\n return (2*self.width) + (2*self.height)", "def perimeter(self):\n return 2 * (self.height + self.width)", "def perimeter(self):\n\t\treturn self.height * 4", "def perimeter(self):\n return (\n self.side_1_length +\n self.side_2_length +\n self.side_3_length +\n self.side_4_length\n )", "def get_perimeter_formula(cls):\n pass", "def perimeter(self):\n return sum(seg.length for seg in self.segments) + \\\n sum([p.perimeter for p in self.subs])", "def perimeter(self):", "def perimeter(self):\n return sum(self._lengths)", "def perimeter(self):\n return sum(self._lengths)", "def perimeter(self):\n return sum([s.length for s in self.segments])", "def get_perimeter_formula(cls):\n dict_perimieter = {'circle':\"2πr\", 'square':'2a+2b', 'rectangle':'2a+2b',\n 'triangle':'3a',\"equilateral triangle\":'a+b+c',\n 'regular pentagon':\"5a\"}\n for k,v in dict_perimieter.items():\n if cls.__name__ == k:\n return v", "def perimeter(a:float, b:float, c:float):\n return a + b + c", "def PolyPerimeter(Coords):\n peri = 0.0\n for i in range(np.shape(Coords)[0]-1):\n # next point coord - current point coord\n peri = peri + ( (Coords[i+1,0] - Coords[i,0])**2 + (Coords[i+1,1] - Coords[i,1])**2 )**0.5\n\n return peri", "def regular_polygon_area(perimeter, apothem):\n return (perimeter * apothem) / 2", "def perimeter(points):\n return sum(get_distances(points))", "def island_perimeter(grid):\n w = len(grid[0])\n h = len(grid)\n perimeter = 0\n\n for i, col in enumerate(grid):\n for j, row in enumerate(col):\n if row == 1:\n perimeter += 4\n if grid[i][j-1] == 1:\n perimeter -= 1\n if grid[i][(j+1) % w] == 1:\n perimeter -= 1\n if grid[(i+1) % h][j] == 1:\n perimeter -= 1\n if grid[i-1][j] == 1:\n perimeter -= 1\n return perimeter", "def perimeter_distance(self, p1, p2):\n\n p1_projection = self.outline.project(shgeo.Point(p1))\n p2_projection = self.outline.project(shgeo.Point(p2))\n\n distance = p2_projection - p1_projection\n\n if abs(distance) > self.outline_length / 2.0:\n # if we'd have to go more than halfway around, it's faster to go\n # the other way\n if distance < 0:\n return distance + self.outline_length\n elif distance > 0:\n return distance - self.outline_length\n else:\n # this ought not happen, but just for completeness, return 0 if\n # p1 and p0 are the same point\n return 0\n else:\n return distance", "def get_perimeter(self, radius: int = 1) -> set:\n return self.get_neighbourhood(radius) - self.get_neighbourhood(radius - 1)", "def island_perimeter(grid):\n perimeter = 0\n for j in range(len(grid)):\n for i in range(len(grid[j])):\n if grid[j][i] == 1:\n perimeter += 4\n if i is not 0 and grid[j][i - 1] is 1:\n perimeter -= 1\n if j is not 0 and grid[j - 1][i] is 1:\n perimeter -= 1\n if j + 1 < len(grid) and grid[j + 1][i] is 1:\n perimeter -= 1\n if i + 1 < len(grid[j]) and grid[j][i + 1] is 1:\n perimeter -= 1\n return perimeter", "def island_perimeter(grid):\n \"\"\"island_perimeter - perimeter of the island\n Parameter\n ---------\n grid:\n list\n Return\n ------\n int\n \"\"\"\n total = 0\n\n rows = len(grid)\n columns = len(grid[0])\n\n for row in range(rows):\n for col in range(columns):\n array = grid[row][col]\n if array == 1:\n total += 4\n if row != 0 and grid[row-1][col] == 1:\n total -= 1\n if col != 0 and grid[row][col-1] == 1:\n total -= 1\n if row + 1 != rows and grid[row + 1][col] == 1:\n total -= 1\n if col + 1 != columns and grid[row][col + 1] == 1:\n total -= 1\n\n return total", "def area(self):\n return (self.__radius ** 2 * math.pi)", "def area(self) -> ir.FloatingValue:\n return ops.GeoArea(self).to_expr()", "def area(self):\n return math.pi*self._radius*self._radius", "def area(self):\n semi_perimeter = self.perimeter() / 2\n area = semi_perimeter\n for l in self._lengths:\n area *= (semi_perimeter - l)\n return float('{:.2f}'.format(area**0.5))" ]
[ "0.778637", "0.77440834", "0.7668872", "0.75241464", "0.74811465", "0.7469736", "0.7424772", "0.7394988", "0.73506325", "0.73477244", "0.71799135", "0.69433486", "0.6926244", "0.685806", "0.685806", "0.6706363", "0.6612634", "0.64354116", "0.63916534", "0.63876265", "0.6308568", "0.62259054", "0.6203108", "0.61890477", "0.6184884", "0.6182203", "0.615136", "0.6135638", "0.6126603", "0.6125356" ]
0.9076304
0
Returns the 2dimensional max distance between two geometries in projected units. If `self` and `right` are the same geometry the function will return the distance between the two vertices most far from each other in that geometry.
def max_distance(self, right: GeoSpatialValue) -> ir.FloatingValue: return ops.GeoMaxDistance(self, right).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def distance(self, right: GeoSpatialValue) -> ir.FloatingValue:\n return ops.GeoDistance(self, right).to_expr()", "def distance_to(self, other: Geometry[Scalar]) -> Scalar:\n return (self._distance_to_point(other)\n if isinstance(other, Point)\n else (non_negative_min(self._distance_to_point(point)\n for point in other.points)\n if isinstance(other, Multipoint)\n else other.distance_to(self)))", "def dist(self, other):\n\t\tif self.areInside(other):\n\t\t\treturn 0\n\n\t\tif self.overlap(other):\n\t\t\treturn 0\n\t\t\n\t\t# Cases when the tiles are laying parallel to each other\n\t\t# Projection across the x axis\n\t\tif self.startX <= other.startX <= self.endX or\\\n\t\t\tself.startX <= other.endX <= self.endX or \\\n\t\t\tother.startX <= self.startX <= other.endX or \\\n\t\t\tother.startX <= self.endX <= other.endX:\n\t\t\treturn min(abs(self.startY - other.endY), abs(self.endY - other.startY))\n\n\t\t# Projection across the y axis\n\t\tif self.startY <= other.startY <= self.endY or\\\n\t\t\tself.startY <= other.endY <= self.endY or \\\n\t\t\tother.startY <= self.startY <= other.endY or \\\n\t\t\tother.startY <= self.endY <= other.endY:\n\t\t\treturn min(abs(self.startX - other.endX), abs(self.endX - other.startX))\n\t\t\t\t\t\n\t\t# Last case is when the tiles are disjoint from each other\n\t\telse:\t\t\n\t\t\t# Case 1: self is to the right of other\n\t\t\tif other.endX < self.startX:\n\t\t\t\t# Case 1.1: other is above self\n\t\t\t\t# Other is northwest of self\n\t\t\t\tif other.endY < other.startY:\n\t\t\t\t\treturn Box.euclid(self.startX, other.endX, self.startY, other.endY)\n\t\t\t\t# Case 1.2: other is below self\n\t\t\t\treturn Box.euclid(self.startX, other.endX, self.endY, other.startY)\n\n\t\t\t# Case 2: self is to the left of other\n\t\t\telse:\n\t\t\t\t# Case 2.1: other is above self\n\t\t\t\tif other.endY < self.startY:\n\t\t\t\t\treturn Box.euclid(self.endX, other.startX, self.startY, other.endY)\n\t\t\t\t# Case 2.2: other is below self\n\t\t\t\t# Other is southeast of self\n\t\t\t\treturn Box.euclid(self.endX, other.startX, self.endY, other.startY)", "def dist(self, other: \"Vector\", sqr=False) -> float: #distance between 2 vectors\n if sqr:\n return (self-other).sqr_mag()\n return (self-other).mag()", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def distance(self, other, projected=True):\n if projected and not isinstance(self.crs, GeographicalCRS):\n x0, y0 = self.vertex[:2]\n x1, y1 = other.get_vertex(self.crs)[:2]\n dist = math.sqrt((x1-x0)*(x1-x0) + (y1-y0)*(y1-y0))\n else:\n lon0, lat0 = self.crs.project(self.x, self.y, inverse=True)\n lon1, lat1 = self.crs.project(other.x, other.y, inverse=True)\n _, _, dist = self.crs.inverse(lon0, lat0, lon1, lat1)\n\n if 2 == len(self.vertex) == len(other.vertex):\n return dist\n else:\n return math.sqrt(dist**2. + (self.z-other.z)**2.)", "def get_distance(self, other):\n return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)", "def distance(self, other):\n # only used in triangle.__str__\n return hypot(self.x - other.x, self.y - other.y)", "def getDistanceBetweenTwoPoints(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)", "def distance(self, other):\n\n return hypot(self.x - other.x, self.y - other.y)", "def __distance_to(self, other: Any) -> float:\n return np.linalg.norm(self.pos - other.pos)", "def getMaximum(self):\n v1 = Vector(*self.p1)\n v2 = Vector(*self.p2)\n if v1.angle >= v2.angle:\n return self.p1\n else:\n return self.p2", "def distance(self, other):\n # distance = math.sqrt((self.position.x - other.position.x) ** 2 +\n # (self.position.y - other.position.y) ** 2)\n distance = math.sqrt(sum((self.position - other.position) ** 2))\n return distance", "def distance(self, other):\n return float(abs(self.x - other.x) + abs(self.y - other.y))", "def distance(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return math.sqrt(dx*dx + dy*dy)", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "def get_distance_between(self, p1, p2):\n\t\treturn math.sqrt(math.pow((p1.x - p2.x), 2) + math.pow((p1.y - p2.y), 2))", "async def max_distance(self, *args):\n return await self._rpc.max_distance(*args)", "def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))", "def get_distance(self, b_a, b_b):\n dx = max(max(0, b_a[1] - b_b[3]), max(0, b_b[1] - b_a[3]))\n dy = max(max(0, b_a[0] - b_b[2]), max(0, b_b[0] - b_a[2]))\n return max(dx, dy)", "def distance_to(self, other):\n ox, oy = other\n return math.hypot(self[0] - ox, self[1] - oy)", "def distance_to(self, other):\n p_self, p_other = self.closest_points(other)\n return np.linalg.norm(p_self - p_other)", "def get_dist_sqrd(self, other):\n return (self.x - other[0])**2 + (self.y - other[1])**2", "def distance(self, other):\n return math.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)", "def difference(self, right: GeoSpatialValue) -> GeoSpatialValue:\n return ops.GeoDifference(self, right).to_expr()", "def distance(self, other: PointOrIterable = None) -> float:\n return (self.distance_squared(other or Point())) ** 0.5", "def getDistance_with_wall(self, pos1, pos2):\n if self._distances == None:\n return manhattanDistance(pos1, pos2)\n if isInt(pos1) and isInt(pos2):\n return self.getDistanceOnGrid(pos1, pos2)\n pos1Grids = getGrids2D(pos1)\n pos2Grids = getGrids2D(pos2)\n bestDistance = self.default\n for pos1Snap, snap1Distance in pos1Grids:\n for pos2Snap, snap2Distance in pos2Grids:\n gridDistance = self.getDistanceOnGrid(pos1Snap, pos2Snap)\n distance = gridDistance + snap1Distance + snap2Distance\n if bestDistance > distance:\n bestDistance = distance\n return bestDistance", "def get_length(self) -> np.float64:\n\n return np.float64(\n sqrt(\n (self.node1.x - self.node2.x) ** 2\n + (self.node1.y - self.node2.y) ** 2\n )\n )" ]
[ "0.7138382", "0.69984686", "0.62663174", "0.6188633", "0.61593235", "0.61498183", "0.61449254", "0.6134089", "0.6068625", "0.6057324", "0.6041501", "0.60340476", "0.6026349", "0.60073996", "0.5995334", "0.59545815", "0.5942099", "0.5938405", "0.59302366", "0.5925812", "0.59211236", "0.5920799", "0.5916041", "0.5903178", "0.58970463", "0.58892506", "0.58836234", "0.5848061", "0.58462274", "0.5819303" ]
0.7459676
0
Return the X minima of a geometry. Returns FloatingValue X minima
def x_min(self) -> ir.FloatingValue: return ops.GeoXMin(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def x_min(self):\n return self.get_min_value(self.X_INDEX)", "def getMinX(self):\n return self.minx", "def MinX(*args, **kwargs):\n return _gdi_.DC_MinX(*args, **kwargs)", "def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX", "def getmin(self):\n\n return self.X", "def minX(self):\n self._updateExtents()\n return self._mMinX", "def xminmax ( self ) :\n return self.xvar.minmax()", "def x(self) -> ir.FloatingValue:\n return ops.GeoX(self).to_expr()", "def min(self) -> \"Stream[float]\":\n return self.agg(np.min).astype(\"float\")", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)", "def minX(self):\n return min(self.getx())", "def get_xmin(self):\n return self.__xmin", "def find_local_min_x(self, Ns=None):\n if Ns is None:\n Ns = self.num\n with self.fix_evaluator():\n params = np.linspace(0, np.pi, Ns)\n dx_func = lambda param: self.diff(param)[0]\n dx = [dx_func(param) for param in params]\n # roots of dx are extrema of x\n roots = find_all_roots(params, dx, func=dx_func)\n if len(roots) < 3: # need at least two maxima and a minimum\n return None\n # take the interior root with smallest x-value\n return min(roots[1:-1], key=lambda param: self(param)[0])", "def xmin(self):\n return asarray([b[0] for b in self.bounds])", "def getXmin(self):\n return min(self.p1.x, self.p2.x)", "def OpenXmin(self, *args):\n return _Bnd.Bnd_Box_OpenXmin(self, *args)", "def get_minx_maxx(self, normalized=True):\n minx = np.array([[0.0] * len(self.encoded_feature_names)])\n maxx = np.array([[1.0] * len(self.encoded_feature_names)])\n\n for idx, feature_name in enumerate(self.continuous_feature_names):\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n\n if normalized:\n minx[0][idx] = (self.permitted_range[feature_name]\n [0] - min_value) / (max_value - min_value)\n maxx[0][idx] = (self.permitted_range[feature_name]\n [1] - min_value) / (max_value - min_value)\n else:\n minx[0][idx] = self.permitted_range[feature_name][0]\n maxx[0][idx] = self.permitted_range[feature_name][1]\n return minx, maxx", "def get_f_minimum(self):\n return np.min(self._Y)", "def get_fmin(self):\n return self.model.predict(self.model.X)[0].min()", "def pmin(\n *x: NumericType,\n na_rm: bool = False\n) -> Iterable[float]:\n maxlen = max(map(length_of, x))\n x = (recycle_value(elem, maxlen) for elem in x)\n return Array([min(elem, na_rm=na_rm) for elem in zip(*x)])", "def min(self):\n return self._min_coords", "def minimum ( self ,\n xmin = None , xmax = None ,\n ymin = None , ymax = None , x0 = () ) :\n \n if xmin is None : xmin = self.xminmax()[0]\n if xmax is None : xmax = self.xminmax()[1]\n if self.xminmax() :\n xmin = max ( xmin , self.xminmax()[0] )\n xmax = min ( xmax , self.xminmax()[1] )\n\n if ymin is None : ymin = self.yminmax()[0]\n if ymax is None : ymax = self.yminmax()[1]\n if self.yminmax() :\n ymin = max ( ymin , self.yminmax()[0] )\n ymax = min ( ymax , self.yminmax()[1] )\n \n if not x0 : x0 = 0.5 * ( xmin + xmax ) , 0.5 * ( ymin + ymax )\n \n if not xmin <= x0[0] <= xmax :\n self.error(\"Wrong xmin/x0[0]/xmax: %s/%s/%s\" % ( xmin , x0[0] , xmax ) )\n\n if not ymin <= x0[1] <= ymax : \n self.error(\"Wrong ymin/x0[1]/ymax: %s/%s/%s\" % ( ymin , x0[1] , ymax ) )\n \n from ostap.math.minimize import sp_minimum_2D\n return sp_minimum_2D ( self ,\n xmin , xmax ,\n ymin , ymax , x0 )", "def get_xmin(self, start, hours, param):\n\n # Process variable\n data = self.get_hour_data(start, param)\n for hour in range(1, hours):\n try:\n data = np.amin([data, self.get_hour_data(start + hour, param)],\n axis=0)\n except ValueError as e:\n continue\n return data", "def get_xrange(self) -> np.array:\n # todo: ensure this functions work as well for y_values\n lower, upper = self.get_xrange_indices()\n return self.x[lower, upper + 1]", "def get_minimum():\n return [\n convert_variables([0.78547, 0.78547, 0.78547]),\n ]", "def OpenXmin(self, *args):\n return _Bnd.Bnd_Box2d_OpenXmin(self, *args)", "def min(*x, na_rm: bool = False) -> Any:\n fun = numpy.nanmin if na_rm else numpy.min\n x = Collection(*x) # flatten\n return fun(x)", "def xmin(self):\n return self.bbox[0][0]", "def min(self):\n return numpy.ma.min(self.data)", "def min_x_arg(self):\n return self.T.min_y_arg" ]
[ "0.6936281", "0.68577117", "0.6676654", "0.65368813", "0.65021634", "0.64860624", "0.64765996", "0.6459425", "0.6271408", "0.62344307", "0.62138295", "0.62108153", "0.6197507", "0.6180147", "0.6161268", "0.60633", "0.6042033", "0.59815675", "0.59366107", "0.58715415", "0.58484024", "0.5847271", "0.584578", "0.58194506", "0.58072156", "0.5750295", "0.5712679", "0.57032543", "0.5699487", "0.5689569" ]
0.75810975
0
Return the X maxima of a geometry. Returns FloatingValue X maxima
def x_max(self) -> ir.FloatingValue: return ops.GeoXMax(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def x_max(self):\n return self.get_max_value(self.X_INDEX)", "def xminmax ( self ) :\n return self.xvar.minmax()", "def getMaxX(self):\n return self.maxx", "def maxx(self):\n return self.__maxx", "def MaxX(*args, **kwargs):\n return _gdi_.DC_MaxX(*args, **kwargs)", "def maxX(self):\n return max(self.getx())", "def OpenXmax(self, *args):\n return _Bnd.Bnd_Box_OpenXmax(self, *args)", "def getXmax(self):\n return max(self.p1.x, self.p2.x)", "def getMaxima(x, y):\n# mx_x = (np.abs(np.min(x)) + np.max(x)) / 2\n# mx_y = (np.abs(np.min(y)) + np.max(y)) / 2\n# \n mx_x = np.max(x)\n mx_y = np.max(y)\n return mx_x, mx_y", "def get_max_x(self) -> float:\n return self.pendulum2.get_max_x()", "def x(self) -> ir.FloatingValue:\n return ops.GeoX(self).to_expr()", "def find_x_max(self, persistence, inf_delta=0.1, band=0.0):\n\n (min_birth, max_death) = self.__min_birth_max_death(persistence, band)\n delta = (max_death - min_birth) * inf_delta\n x_max = max_death + delta\n return x_max", "def maxX(self):\n self._updateExtents()\n return self._mMaxX", "def OpenXmax(self, *args):\n return _Bnd.Bnd_Box2d_OpenXmax(self, *args)", "def compute_max(self):\r\n self.x_max = self.ox + self.dx*self.nx\r\n self.y_max = self.oy + self.dy*self.ny\r\n self.z_max = self.oz + self.dz*self.nz", "def max_point(self):\n x = self.max(0).idxmax()\n y = self.loc[:, x].idxmax()\n return x, y", "def _get_extremes(self, attr='values'):\n # calculate the maximum and minimum for all series\n series_max = [0]\n series_min = [0]\n for s in self:\n if s is not None:\n series_max.append(s.max(attr))\n series_min.append(s.min(attr))\n return min(series_min), max(series_max)", "def y_max(self) -> ir.FloatingValue:\n return ops.GeoYMax(self).to_expr()", "def max_x_arg(self):\n return self.T.max_y_arg", "def max(self) -> \"Stream[float]\":\n return self.agg(np.max).astype(\"float\")", "def get_min_max_x(self, min_x = 1e9, max_x = -1e9, exclude = []): \n \n if self.verbose > 1:\n print(\"MultiLinearSpectra.get_min_max_x()\") \n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n min_x, max_x = self.mess[m][\"object\"].get_min_max_x(min_x, max_x)\n \n return min_x, max_x", "def OpenZmax(self, *args):\n return _Bnd.Bnd_Box_OpenZmax(self, *args)", "def get_minmax(self):\n x_minmax = [np.min(self.grid['x']), np.max(self.grid['x'].max())]\n z_minmax = [np.min(self.grid['z']), np.max(self.grid['z'].max())]\n return x_minmax, z_minmax", "def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max", "def xmax(self):\n return asarray([b[1] for b in self.bounds])", "def maximum ( self , xmin = None , xmax = None , x0 = None ) :\n if xmin is None : xmin = self.xminmax()[0]\n if xmax is None : xmax = self.xminmax()[1]\n if self.xminmax() :\n xmin = max ( xmin , self.xminmax()[0] )\n xmax = min ( xmax , self.xminmax()[1] )\n\n if ymin is None : ymin = self.yminmax()[0]\n if ymax is None : ymax = self.yminmax()[1]\n if self.yminmax() :\n ymin = max ( ymin , self.yminmax()[0] )\n ymax = min ( ymax , self.yminmax()[1] )\n \n if not x0 : x0 = 0.5 * ( xmin + xmax ) , 0.5 * ( ymin + ymax )\n\n if not xmin <= x0[0] <= xmax :\n self.error(\"Wrong xmin/x0[0]/xmax: %s/%s/%s\" % ( xmin , x0[0] , xmax ) )\n\n if not ymin <= x0[1] <= ymax : \n self.error(\"Wrong ymin/x0[1]/ymax: %s/%s/%s\" % ( ymin , x0[1] , ymax ) )\n\n from ostap.math.minimize import sp_maximum_2D\n return sp_maximum_2D ( self ,\n xmin , xmax ,\n ymin , ymax , x0 )", "def get_minx_maxx(self, normalized=True):\n minx = np.array([[0.0] * len(self.encoded_feature_names)])\n maxx = np.array([[1.0] * len(self.encoded_feature_names)])\n\n for idx, feature_name in enumerate(self.continuous_feature_names):\n max_value = self.train_df[feature_name].max()\n min_value = self.train_df[feature_name].min()\n\n if normalized:\n minx[0][idx] = (self.permitted_range[feature_name]\n [0] - min_value) / (max_value - min_value)\n maxx[0][idx] = (self.permitted_range[feature_name]\n [1] - min_value) / (max_value - min_value)\n else:\n minx[0][idx] = self.permitted_range[feature_name][0]\n maxx[0][idx] = self.permitted_range[feature_name][1]\n return minx, maxx", "def zminmax ( self ) :\n return self.zvar.minmax()", "def getMinMax(self,arr):\n minz=arr['zmg']-arr['sigma_pz']*5\n dmin=self.zcat-5*self.sigmacat\n minz[np.where(minz>dmin)]=dmin\n maxz=arr['zmg']+arr['sigma_pz']*5\n dax=self.zcat+5*self.sigmacat\n maxz[np.where(maxz<dmax)]=dmax\n return dmin,dmax", "def x_min(self) -> ir.FloatingValue:\n return ops.GeoXMin(self).to_expr()" ]
[ "0.70034647", "0.6844462", "0.68119204", "0.6744491", "0.66253954", "0.65137553", "0.6445284", "0.63773924", "0.6355037", "0.6352898", "0.634081", "0.6320554", "0.6241515", "0.61676", "0.60805666", "0.60124505", "0.59927404", "0.59921575", "0.5976097", "0.5970284", "0.59668744", "0.593871", "0.5922835", "0.5918915", "0.590377", "0.59001744", "0.58882904", "0.5883043", "0.58824676", "0.58779997" ]
0.774601
0
Return the Y minima of a geometry. Returns FloatingValue Y minima
def y_min(self) -> ir.FloatingValue: return ops.GeoYMin(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def yminmax ( self ) :\n return self.yvar.minmax()", "def argminY( self ):\n min = 1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min: min = p[1]\n return min", "def getMinY(self):\n return self.miny", "def get_f_minimum(self):\n return np.min(self._Y)", "def y_min(self):\n return self.get_min_value(self.Y_INDEX)", "def get_y_min(self):\n if len(self._statDict) == 0:\n return 1E10\n\n line_id_list = self._statDict.keys()\n min_y = self._statDict[line_id_list[0]][2]\n for i_plot in range(1, len(line_id_list)):\n if self._statDict[line_id_list[i_plot]][2] < min_y:\n min_y = self._statDict[line_id_list[i_plot]][2]\n\n return min_y", "def get_min_max(self):\n\n mr = np.sqrt(2 * np.log(1/self.mth)) * self.ms\n mr[:] = np.max(mr)\n\n mxmin = self.mx - mr\n mxmax = self.mx + mr\n mymin = self.my - mr\n mymax = self.my + mr\n mzmin = self.mz - mr\n mzmax = self.mz + mr\n\n mb_xmin_idx = np.argmin(mxmin[self.ma > 0])\n mb_xmax_idx = np.argmax(mxmax[self.ma > 0])\n mb_ymin_idx = np.argmin(mymin[self.ma > 0])\n mb_ymax_idx = np.argmax(mymax[self.ma > 0])\n mb_zmin_idx = np.argmin(mzmin[self.ma > 0])\n mb_zmax_idx = np.argmax(mzmax[self.ma > 0])\n\n xmin0 = self.mx[mb_xmin_idx] - mr[mb_xmin_idx]\n xmax0 = self.mx[mb_xmax_idx] + mr[mb_xmax_idx]\n ymin0 = self.my[mb_ymin_idx] - mr[mb_ymin_idx]\n ymax0 = self.my[mb_ymax_idx] + mr[mb_ymax_idx]\n zmin0 = self.mz[mb_zmin_idx] - mr[mb_zmin_idx]\n zmax0 = self.mz[mb_zmax_idx] + mr[mb_zmax_idx]\n\n xmin = xmin0 - (xmax0 - xmin0) * 0.25\n xmax = xmax0 + (xmax0 - xmin0) * 0.25\n ymin = ymin0 - (ymax0 - ymin0) * 0.25\n ymax = ymax0 + (ymax0 - ymin0) * 0.25\n zmin = zmin0 - (zmax0 - zmin0) * 0.25\n zmax = zmax0 + (zmax0 - zmin0) * 0.25\n\n return xmin, xmax, ymin, ymax, zmin, zmax", "def MinY(*args, **kwargs):\n return _gdi_.DC_MinY(*args, **kwargs)", "def min_y_arg(self):\n return min((self(0).y,0), (self(1).y,1))[1]", "def y(self) -> ir.FloatingValue:\n return ops.GeoY(self).to_expr()", "def minY(self):\n self._updateExtents()\n return self._mMinY", "def getYmin(self):\n return min(self.p1.y, self.p2.y)", "def getMaxima(x, y):\n# mx_x = (np.abs(np.min(x)) + np.max(x)) / 2\n# mx_y = (np.abs(np.min(y)) + np.max(y)) / 2\n# \n mx_x = np.max(x)\n mx_y = np.max(y)\n return mx_x, mx_y", "def y_max(self) -> ir.FloatingValue:\n return ops.GeoYMax(self).to_expr()", "def get_y_lims(ax, xlims):\n # Assuming that all objects have the same x coordinates\n x = ax.lines[0].get_data()[0]\n\n indexes = get_interval(x, xlims[0], xlims[1])\n xmax = x[indexes[-1]]\n xmin = x[indexes[0]]\n\n ymax_array = []\n ymin_array = []\n\n for function in ax.lines:\n y = function.get_data()[1]\n\n ymin_array.append(np.min(y[indexes]))\n ymax_array.append(np.max(y[indexes]))\n\n ymax = max(ymax_array)\n ymin = min(ymin_array)\n\n return xmin, xmax, ymin, ymax", "def minY(self):\n return min(self.gety())", "def getYF(self):\r\n return self.yFus;", "def fl_get_positioner_ybounds(ptr_flobject):\n _fl_get_positioner_ybounds = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_positioner_ybounds\",\n None, [cty.POINTER(xfdata.FL_OBJECT), cty.POINTER(cty.c_double),\n cty.POINTER(cty.c_double)],\n \"\"\"void fl_get_positioner_ybounds(FL_OBJECT * ob, double * min,\n double * max)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n f_minbound, ptr_minbound = library.make_doublec_and_pointer()\n f_maxbound, ptr_maxbound = library.make_doublec_and_pointer()\n library.keep_elem_refs(ptr_flobject, f_minbound, f_maxbound, \\\n ptr_minbound, ptr_maxbound)\n _fl_get_positioner_ybounds(ptr_flobject, ptr_minbound, ptr_maxbound)\n return f_minbound.value, f_maxbound.value", "def y(self):\n return np.sum(self.bbox, 0)[1] / 2", "def y(self) -> float:\n return self.data[1]", "def getMinMax(self,arr):\n minz=arr['zmg']-arr['sigma_pz']*5\n dmin=self.zcat-5*self.sigmacat\n minz[np.where(minz>dmin)]=dmin\n maxz=arr['zmg']+arr['sigma_pz']*5\n dax=self.zcat+5*self.sigmacat\n maxz[np.where(maxz<dmax)]=dmax\n return dmin,dmax", "def xycurves_read_y(self) -> float:\n return float(self.dss_obj.XYCurvesF(ctypes.c_int32(2), ctypes.c_double(0)))", "def OpenYmin(self, *args):\n return _Bnd.Bnd_Box_OpenYmin(self, *args)", "def zminmax ( self ) :\n return self.zvar.minmax()", "def OpenYmin(self, *args):\n return _Bnd.Bnd_Box2d_OpenYmin(self, *args)", "def y_axis_left_min(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"y_axis_left_min\")", "def y_axis_left_min(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"y_axis_left_min\")", "def y_axis_left_min(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"y_axis_left_min\")", "def y_axis_left_min(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"y_axis_left_min\")", "def y_axis_left_min(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"y_axis_left_min\")" ]
[ "0.66017616", "0.6561854", "0.6507894", "0.6417266", "0.6388026", "0.6362979", "0.6347971", "0.6301745", "0.6212872", "0.6202674", "0.615909", "0.61345196", "0.61152333", "0.61130464", "0.6020709", "0.5908351", "0.579901", "0.57950985", "0.57829785", "0.5726", "0.5721702", "0.5708202", "0.56918573", "0.568105", "0.5653993", "0.56459075", "0.56459075", "0.56459075", "0.56459075", "0.56459075" ]
0.7229437
0
Return the Y maxima of a geometry. Returns FloatingValue Y maxima
def y_max(self) -> ir.FloatingValue: return ops.GeoYMax(self).to_expr()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MaxY(*args, **kwargs):\n return _gdi_.DC_MaxY(*args, **kwargs)", "def y_max(self):\n return self.get_max_value(self.Y_INDEX)", "def getMaxY(self):\n return self.maxy", "def get_y_max(self):\n if len(self._statDict) == 0:\n return -1E10\n\n line_id_list = self._statDict.keys()\n max_y = self._statDict[line_id_list[0]][3]\n for i_plot in range(1, len(line_id_list)):\n if self._statDict[line_id_list[i_plot]][3] > max_y:\n max_y = self._statDict[line_id_list[i_plot]][3]\n\n return max_y", "def max_y_arg(self):\n return max((self(0).y,0), (self(1).y,1))[1]", "def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max", "def get_max_y(self) -> float:\n return self.pendulum2.get_max_y()", "def yminmax ( self ) :\n return self.yvar.minmax()", "def getYmax(self):\n return max(self.p1.y, self.p2.y)", "def getMaxima(x, y):\n# mx_x = (np.abs(np.min(x)) + np.max(x)) / 2\n# mx_y = (np.abs(np.min(y)) + np.max(y)) / 2\n# \n mx_x = np.max(x)\n mx_y = np.max(y)\n return mx_x, mx_y", "def OpenYmax(self, *args):\n return _Bnd.Bnd_Box2d_OpenYmax(self, *args)", "def maxY(self):\n self._updateExtents()\n return self._mMaxY", "def max(self):\n max_i = np.nanargmax(self.ys)\n return self.xs[max_i], self.ys[max_i]", "def OpenYmax(self, *args):\n return _Bnd.Bnd_Box_OpenYmax(self, *args)", "def y(self) -> ir.FloatingValue:\n return ops.GeoY(self).to_expr()", "def find_max_f():\n fmax = fmin(g, 2)\n return fmax[0]", "def zmax(self):\n # Extract parameters\n pzs = self.params[0]\n return max([pz.zmax for pz in pzs])", "def zmax(self):\n # Extract parameters\n pzs = self.params[0]\n return max([pz.zmax for pz in pzs])", "def get_ylim(self):\n if isinstance(self._frame, root.TH1F):\n return (self._frame.GetMinimum(), self._frame.GetMaximum())\n else:\n return (self._frame.GetYaxis().GetXmin(), self._frame.GetYaxis().GetXmax())", "def get_max_coordinates(self) -> float:\n return self.pendulum2.get_max_coordinates()", "def x_max(self) -> ir.FloatingValue:\n return ops.GeoXMax(self).to_expr()", "def maxY(self):\n return max(self.gety())", "def max(self):\n return self._reduce_for_stat_function(F.max, only_numeric=False)", "def compute_max(self):\r\n self.x_max = self.ox + self.dx*self.nx\r\n self.y_max = self.oy + self.dy*self.ny\r\n self.z_max = self.oz + self.dz*self.nz", "def absmax(self):\n raise NotImplementedError", "def f_max(cls):\n return cls.params[\"f_max\"]", "def zmax(self):\n return self._zi", "def getYF(self):\r\n return self.yFus;", "def globalMaximum(self):\n # The global maximum is at one peak's position\n potential_max = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n potential_max.append((func(pos, pos, height, width), pos))\n return max(potential_max)", "def max_value(self) -> Union[int, float]:\n return self.right_boundary['value']" ]
[ "0.7133755", "0.7071627", "0.6952557", "0.69172186", "0.68478966", "0.67792916", "0.6676045", "0.6672287", "0.66261244", "0.66043246", "0.65261805", "0.6523265", "0.65193427", "0.6452775", "0.6387825", "0.6349131", "0.6341176", "0.6341176", "0.6329433", "0.63072056", "0.62842774", "0.62676734", "0.62588805", "0.62327534", "0.619897", "0.616121", "0.6146771", "0.6122814", "0.6092457", "0.60782653" ]
0.79806757
0