query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
samples a set of unique integers with length size from the interval \[0,max\]
def unique_sample_of_int(max,size): idxs=set() num_left = size - len(idxs) while num_left > 0: idxs = idxs.union(set(np.random.random_integers(0,max,size=num_left))) num_left = size - len(idxs) return idxs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_testdata(N: int, min_value: int, max_value: int) -> list:\r\n numbers = set([])\r\n while len(numbers) < N:\r\n random = randint(min_value, max_value)\r\n numbers.add(random)\r\n return list(numbers)", "def choose_m_n(li,min,max):\n n_items = random.randrange(min,max+1)\n if n_items == 0:\n return [ ]\n sample=random.sample(li,n_items) # Should it be sorted?\n return sample", "def genRandomIntListWithinRange(size, minLim, maxLim):\n\tvalues = set()\n\tfor i in range(size):\n\t\tval = randint(minLim, maxLim)\n\t\twhile val not in values:\n\t\t\tvalues.add(val)\n\treturn list(values)", "def individual(length, min, max):\r\n return [ randint(min, max) for x in range(length) ]", "def list_random_sample_numbers(min: int, max: int, length: int) -> List:\r\n result = random.sample(range(min, max), length)\r\n return result", "def initial_sampling(y):\n samples = list(np.random.randint(0, len(y), 2))\n while len(np.unique(y[samples] > 0.5)) != 2:\n samples = list(np.random.randint(0, len(y), 2))\n return samples", "def random_sampling(total_nums: int, samples_needed: int, start_num=1):\n target = np.arange(start_num, total_nums + 1)\n np.random.shuffle(target)\n\n return target[:samples_needed]", "def sample_distrib(d_t, data_set, set_size, max_wanted_samples):\n # when distrib is updated it seems some distrib[i] are updated too much so max is really high and\n training_samples = []\n # distrib_mean = np.mean(d_t)\n distrib_max = max(d_t)\n for _ in range(max_wanted_samples):\n id = rd.randint(0, set_size)\n sample = data_set[id]\n # v = rd.uniform(0, distrib_mean)\n v = rd.uniform(0, distrib_max)\n if d_t[id] > v:\n training_samples.append(sample)\n return training_samples", "def choose_ordered_m_n(li,min,max):\n n_items = random.randrange(min,max+1)\n if n_items == 0:\n return [ ]\n indices = list(range(len(li)))\n sample=random.sample(indices,n_items) # Should it be sorted?\n return [li[i] for i in sorted(sample)]", "def small_sample(num):\n sample = [0] * num\n for i in range(num):\n u = random.randint(0, 3)\n if u == 3:\n sample[i] = -1\n if u == 2:\n sample[i] = 1\n return sample", "def get_unsorted_list(size,MaxN=1000,MinN=0):\n return [random.randint(MinN,MaxN) for i in xrange(size)]", "def list_random_numbers(min: int, max: int, length: int) -> List:\r\n # Many instructions + test condition we can use random.sample()\r\n # See next function 'list_random_sample_numbers()'\r\n result = []\r\n while len(result) < length:\r\n n = randint(min, max)\r\n if n not in result:\r\n result.append(n)\r\n return result", "def pull_n_samples(dset, n):\n return list(dset[i] for i in random.sample(range(len(dset)), n))", "def test_random_small_sample(self):\n pop0 = []\n pop1 = [1]\n popmany = range(10)\n self.assertEqual(set(), random_small_sample(pop0, 0.80))\n self.assertEqual(set(pop1), random_small_sample(pop1, 0.80))\n self.assertEqual(set(popmany), random_small_sample(popmany, 1))\n self.assertEqual(set(pop0), random_small_sample(popmany, 0))\n popmany_50 = random_small_sample(popmany, 0.50)\n self.assertLess(len(popmany_50), len(popmany))\n self.assertGreater(len(popmany_50), 0)", "def get_sampled_ids(self):\n seed = 123\n #initiate two lists, to save randomly picked positive and negative cases respectively\n positiveIds = []\n negativeIds = []\n i = 0\n print \"==> resampling ... \",\n while len(positiveIds)+len(negativeIds)<self.ntotal:\n # start a loop from 0 to total size of the new sampe\n # if it catches a number divisable by the sought ratio, update the list of positive cases ids\n # otherwise keep update the list of negative cases ids\n try:\n if i%int(100 / self.posRate) == 0: \n positiveIds.append(self.posId.next())\n else:\n negativeIds.append(self.negId.next())\n except:\n print \"Enter posRate higher than the initial rate\"\n break\n i+=1\n print \"Done sampling\"\n print \"positive:\", len(positiveIds)\n print \"negative:\", len(negativeIds)\n print \"final size:\", len(positiveIds)+len(negativeIds)\n #return sorted list of the two list of ids combined\n return sorted(positiveIds+negativeIds)", "def _sample_n_unique(n, lo, hi, exclude=None):\n batch = np.empty(n, dtype=np.uint32)\n k = 0\n while k < n:\n samples = np.random.randint(low=lo, high=hi, size=n - k)\n samples = np.unique(samples) # Get only the unique entries\n # Get only the entries which are not in exclude\n if exclude is not None:\n valid = np.all(samples[:, None] != exclude, axis=-1)\n # print(\"***\", (samples[:, None] != exclude).shape, valid) # (32, 5)\n samples = samples[valid] # (None,) contains True or False\n # print(\"samples:\", samples)\n # Update batch\n end = min(k + samples.shape[0], n)\n batch[k:end] = samples\n k = end\n return batch", "def random_sampling(elements, n):\r\n import random\r\n return [random.choice(elements) for i in range(n)]", "def uniform_sample(upper, num):\n sample = []\n for i in range(num):\n value = random.randint(0, upper - 1)\n sample.append(value)\n return sample", "def make_random_ints_no_dups(num, lower_bound, upper_bound):\n result = []\n rng = random.Random()\n for i in range(num):\n while True:\n candidate = rng.randrange(lower_bound, upper_bound)\n if candidate not in result:\n break\n result.append(candidate)\n return result", "def get_set(dim, maximum):\n\n i = 0\n numbers = []\n while i**2 <= maximum:\n n = i**2\n counter = 0\n while n <= maximum and counter < dim:\n numbers += [i**2]\n n += i**2\n counter += 1\n i += 1\n return numbers", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def compute_random_subset(values, num_values):\n shuffled = values[:]\n random.shuffle(shuffled)\n return shuffled[:num_values]", "def Samples(n=6, m=1000):\n t = [Sample(n) for i in range(m)]\n return t", "def _sample_schechter(x0, alpha, x_min, size=100, max_iter=1000):\n out = []\n n = 0\n num_iter = 0\n while (n<size) & (num_iter<max_iter):\n x = np.random.gamma(scale=x0, shape=alpha+2, size=size)\n x = x[x>x_min]\n u = np.random.uniform(size=x.size)\n x = x[u<x_min/x]\n out.append(x)\n n+=x.size\n num_iter += 1\n\n if num_iter >= max_iter:\n msg = (\"The maximum number of iterations reached.\",\n \"Random variates may not be representitive.\",\n \"Try increasing `max_iter`.\")\n print(msg)\n\n return np.concatenate(out)[:size]", "def random_ints(count=20, min=1, max=50):\n import random\n return [random.randint(min, max) for _ in range(count)]", "def sample(self, start: typing.Any, num: int) -> typing.List[typing.Any]:\r\n\r\n if num >= len(self.values):\r\n return self.values.copy()\r\n\r\n if start is None:\r\n sample_inds = np.random.choice(len(self.values), (num,), replace=False)\r\n result = []\r\n else:\r\n index_of_start = self.values.index(start)\r\n sample_inds = np.random.choice(len(self.values) - 1, (num - 1,), replace=False)\r\n sample_inds[sample_inds >= index_of_start] += 1\r\n\r\n result = [start]\r\n\r\n for ind in sample_inds:\r\n result.append(self.values[ind])\r\n return result", "def individual(min_val, max_val):\n value_list = [i for i in range(min_val, max_val+1)] #generate a list of 1 to 10\n random.shuffle(value_list) #shuffle the list\n return value_list", "def generate_random_data(min_, max_, len_):\n return np.random.uniform(min_, max_, len_)", "def create_random_sample_alt(idx_bins,count_array):\n idxs=[]\n for i,x in enumerate(count_array):\n if x > 0:\n idxs.extend([ idx_bins[i][ind] for ind in unique_sample_of_int(len(idx_bins[i])-1,x) ] )\n return idxs" ]
[ "0.7025099", "0.6946078", "0.6803426", "0.6712467", "0.6691708", "0.65714836", "0.6532559", "0.6503982", "0.64042825", "0.6374597", "0.6359894", "0.63190323", "0.6311178", "0.62729216", "0.62661505", "0.6262865", "0.6239398", "0.62374264", "0.621685", "0.62101054", "0.6199613", "0.6199613", "0.61863935", "0.61716646", "0.6163632", "0.6147477", "0.6129006", "0.6124986", "0.6116567", "0.61082" ]
0.8324977
0
returns set of all geneIDs linked to the SNPs of sig_snp in snp_dict
def get_sig_gene_set(snp_dict,sig_snp): geneIDs=[] for chrom in sig_snp.keys(): for bps in sig_snp[chrom]['bps']: idx = snp_dict[chrom]['bps'].searchsorted(bps) if (idx < len(snp_dict[chrom]['bps'])) and snp_dict[chrom]['bps'][idx] == bps and snp_dict[chrom]['genes'][idx]: geneIDs.extend(snp_dict[chrom]['genes'][idx]) return set(geneIDs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_genesets(snp_dict,gene_file):\n inf = open(gene_file,\"r\")\n for i in snp_dict.keys():\n snp_dict[i]['genes']=np.empty(len(snp_dict[i]['bps']), dtype=set)\n for line in inf:\n if re.match(\"\\#\",line):\n continue\n line.rstrip()\n fields=line.split()\n if len(fields) < 3:\n continue\n bps=int(fields[1])\n if fields[0] in snp_dict.keys():\n idx = snp_dict[fields[0]]['bps'].searchsorted(bps)\n if (idx < len(snp_dict[fields[0]]['bps'])) and snp_dict[fields[0]]['bps'][idx] == bps:\n snp_dict[fields[0]]['genes'][idx]=set([ x for x in fields[2:] ])\n return True", "def get_snps(self):\n d = {}\n with open(self.snp_file, 'r') as infile:\n for row in infile:\n if row:\n row_split = row.strip().split('\\t')\n chrom = row_split[0]\n pos = row_split[1]\n name = row_split[3].split('|')\n snp_id = name[0]\n gene = name[1]\n ref_allele = name[2]\n alt_alleles = name[3]\n freq = name[4]\n genome = name[5]\n d[snp_id] = {\n 'chrom': chrom,\n 'pos': pos,\n 'ref': ref_allele,\n 'alt': alt_alleles,\n 'gene': gene,\n 'maf': freq,\n 'genome_build': genome\n }\n return d", "def nsg_ids(self):\n return self._nsg_ids", "def give_active_snp(self):\r\n genome_features = set()\r\n for key,value in self.features_id.items():\r\n if value.type == \"SNP\" and value.attributes.active:\r\n ref_seq = value.attributes.allele[0]\r\n if value.attributes.discovered and not value.attributes.validated:\r\n # snp that have been added to the gff3 not used for designing the primers\r\n # should have been validated before they can be called targets.\r\n # this is a edge case that should never happen in practise.\r\n pass\r\n elif all(len(ref_seq)==len(x) for x in value.attributes.allele):\r\n # if all the alleles are the same length it is a SNP not a indel\r\n genome_features.add(value.attributes.id + '_' + ref_seq)\r\n else:\r\n genome_features.add(value.attributes.id +'_'+ ref_seq + '_indel')\r\n return genome_features", "def genes():\n data=pd.read_csv(config['stan'], sep=\" \")\n return list(set(data['Gene_id']))", "def get_sid_set(sources):\n sid_list = []\n for source_dict in sources:\n sid = source_dict['SID']\n sid_list.append(sid)\n sid_set = set(sid_list)\n\n assert len(sid_set) == len(sid_set), \"Duplicate SID detected\"\n return sid_set", "def get_unique_snps(self):\n\n for chromosome in self.snpsites.keys():\n\n for position in self.snpsites[chromosome].keys():\n for filenumber in range(len(self.vcffilenames)):\n\n if (\n self.snpsites[chromosome][position][filenumber] == True\n and sum(self.snpsites[chromosome][position]) == 1\n ): # First any(array) finds\n self.snp_positions[self.vcffilenames[filenumber]][chromosome][\n position\n ].update({\"unique\": True})\n elif (\n sum(self.snpsites[chromosome][position]) >= 2\n ): # there might be snp at same position but with different alt base\n\n snp_index = [\n i\n for i, j in enumerate(self.snpsites[chromosome][position])\n if j == True\n ]\n\n totalindex = len(snp_index)\n # Lets check the alt base in these vcf files using index\n # lets get array of alt bases from each file\n alt_snps = []\n for index in snp_index:\n alt_snps.append(\n self.snp_positions[self.vcffilenames[index]][\n chromosome\n ][position][\"alt\"]\n )\n\n # get the counts of the elements\n\n counts = self.count_list_elements_occurrences(alt_snps)\n\n for index in range(len(counts)):\n if counts[index] == 1:\n # this is unique, so occurred once\n self.snp_positions[self.vcffilenames[snp_index[index]]][\n chromosome\n ][position].update(\n {\"unique\": True}\n ) # vcffilenames[snp_index[index]] = this will be the filename\n # print(\"this is unique\", vcffilenames[snp_index[index]], chromosome, position, self.snp_positions[vcffilenames[snp_index[index]]][chromosome][position])\n\n # else:\n # \tvcf_database[\"self.snp_positions\"][chromosome + \"_\" + position].update({\"unique\":False})\n\n return", "def create_bin_indeces(snp_dict,sig_snp):\n for i in sig_snp.keys():\n dig=np.digitize(snp_dict[i]['afs'],sig_snp[i]['afs_hist']['bins'])\n # bin indeces are shifted +1 against histogram count indeces\n dig -= 1\n indx_bins=defaultdict(list)\n for j,x in enumerate(dig):\n indx_bins[x].append(j)\n for j in indx_bins.keys():\n indx_bins[j]=np.array(indx_bins[j])\n snp_dict[i]['bin_idx']=indx_bins\n return True", "def get_splice_signals(introns, fasta_filename):\n sites = {}\n genome = Fasta(fasta_filename)\n for i in introns:\n if i.seqid not in genome:\n sites[i] = 'xxxx'\n else:\n # indices into pyfaidx sequences are zero-based\n sites[i] = genome[i.seqid][(i.start - 1):(i.start + 1)].seq.upper() + genome[i.seqid][(i.end - 2):i.end].seq.upper()\n\n return sites", "def give_deactivated_snp(self):\r\n genome_features = set()\r\n for key,value in self.features_id.items():\r\n if value.type == \"SNP\" and not value.attributes.active:\r\n ref_seq = value.attributes.allele[0]\r\n if all(len(ref_seq) == len(x) for x in value.attributes.allele):\r\n genome_features.add(value.attributes.id + '_' + ref_seq)\r\n else:\r\n genome_features.add(value.attributes.id + '_' + ref_seq + '_indel')\r\n return genome_features", "def get_gene_symbols(self):\n # TODO: could be made much nicer with join in DB via SQL Alchemy\n bins = binning.containing_bins(self.start - 1, self.end)\n gene_intervals = list(\n GeneInterval.objects.filter(\n database=\"ensembl\",\n release=self.release,\n chromosome=self.chromosome,\n bin__in=bins,\n start__lte=self.end,\n end__gte=self.start,\n )\n )\n gene_ids = [itv.gene_id for itv in gene_intervals]\n symbols1 = {\n o.gene_symbol for o in EnsemblToGeneSymbol.objects.filter(ensembl_gene_id__in=gene_ids)\n }\n symbols2 = {o.symbol for o in Hgnc.objects.filter(ensembl_gene_id__in=gene_ids)}\n return sorted(symbols1 | symbols2)", "def get_gene_symbols(self):\n # TODO: could be made much nicer with join in DB via SQL Alchemy\n bins = binning.containing_bins(self.start - 1, self.end)\n gene_intervals = list(\n GeneInterval.objects.filter(\n database=\"ensembl\",\n release=self.release,\n chromosome=self.chromosome,\n bin__in=bins,\n start__lte=self.end,\n end__gte=self.start,\n )\n )\n gene_ids = [itv.gene_id for itv in gene_intervals]\n symbols1 = {\n o.gene_symbol for o in EnsemblToGeneSymbol.objects.filter(ensembl_gene_id__in=gene_ids)\n }\n symbols2 = {o.symbol for o in Hgnc.objects.filter(ensembl_gene_id__in=gene_ids)}\n return sorted(symbols1 | symbols2)", "def hvgs_ids(self):\n if not hasattr(self, '_hvgs_ids'):\n mv = myvariant.MyVariantInfo()\n self._hvgs_ids = [i['_id'] for i in\n mv.query(self.snp_loc, fields='id')['hits']]\n return self._hvgs_ids", "def get_nps(self):\n\n # determine all leaf ids in the parse tree which refer to a noun\n nouns = []\n for node_id in self.parsetree.nodes():\n node = self.parsetree.node[node_id]\n if not node['is_leaf']:\n continue\n leaf_idx = node['left_leaf_idx']\n if leaf_idx >= len(self.tokens):\n continue\n self.words[leaf_idx] == node['label']\n is_noun = self.tokens[leaf_idx].POS.cdata[0] == 'N'\n if is_noun:\n nouns.append(node_id)\n\n NPs = set()\n for noun in nouns:\n NPs.add(self.get_np_for_idx(noun))\n return NPs", "def get_gene_sets(table, dominant):\n \n known = table[table[\"hgnc\"].isin(dominant)]\n gwide = set(known[\"hgnc\"][known[\"genomewide\"]])\n sugg = set(known[\"hgnc\"][known[\"suggestive\"]])\n \n gene_sets = {\"genomewide\": gwide, \"suggestive\": sugg}\n \n return gene_sets", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def nsrGenera(taxonList, synonymList):\r\n species = list(filter(None, sorted(taxonList + synonymList)))\r\n generaList = [i.split()[0] for i in species]\r\n generaList = list(dict.fromkeys(generaList))\r\n return generaList", "def get_RSOPuids_in_PFFGS(seg):\n \n rsopuids = [] \n \n sequences = seg.PerFrameFunctionalGroupsSequence\n \n for sequence in sequences:\n uid = sequence.DerivationImageSequence[0]\\\n .SourceImageSequence[0]\\\n .ReferencedSOPInstanceUID\n \n rsopuids.append(uid)\n \n return rsopuids", "def mel_gene_set(dict): # this uses the flanking genes, specifically\n\tmel_gene_set = set()\n\tfor k, v in dict.iteritems():\n\t\t#v[0] is up, v[1] is down\n\t\t#print \"this is v:\", v\n\t\tfor mg in v[0]:\n\t\t\tmel_gene_set.add(mg)\n\t\tfor mg in v[1]:\n\t\t\tmel_gene_set.add(mg)\n\treturn mel_gene_set", "def get_plasmid_gene_nodes(G):\n plasmid_gene_nodes = [nd for nd in G.nodes() if G.nodes[nd]['gene']==True]\n logger.info(\"Found %d nodes with plasmid genes\" % len(plasmid_gene_nodes))\n return plasmid_gene_nodes", "def all_possible_gene_transcription(dna: str):\n result = set()\n for dna in (dna, reverse_complement(dna)):\n rna = dna_to_rna(dna)\n start = find_motif(rna, START_CODON)\n for s in start:\n r = rna_to_protein(rna, start=s, end=True)\n if r:\n result.add(r)\n return result", "def geneIds(self):\n\t\treturn self._dataframe.index.tolist()", "def process_ss_dict(ss_dict, primary_p=10e-8, secondary_p=0.05, n_nom=1, \n secondary_or_nom=False):\n\n sig_crbs = {}\n\n for hpo, bedpath in ss_dict.items():\n\n ss = pd.read_csv(bedpath, sep='\\t')\n\n sig_primary = ss['meta_phred_p'] >= -np.log10(primary_p)\n sig_secondary = ss['meta_phred_p_secondary'] >= -np.log10(secondary_p)\n sig_nom = ss['n_nominal_cohorts'] >= n_nom\n\n if secondary_or_nom:\n sig = (sig_primary & (sig_secondary | sig_nom))\n else:\n sig = (sig_primary & sig_secondary & sig_nom)\n\n if sum(sig) > 0:\n for crb in ss['crb_id'][sig].tolist():\n if crb in sig_crbs.keys():\n sig_crbs[crb].append(hpo)\n else:\n sig_crbs[crb] = [hpo]\n\n return sig_crbs", "def get_pubkey_ids(self, addr):\n\n if len(self.pubkeys) > 0 and self.pubkeys[-1].has_key('fingerprint') \\\n and self.pubkeys[-1].has_key('uids'):\n\n pubkey_ids = []\n # compile pattern before use for better performance\n RCPT_RE = re.compile(addr)\n for k in self.pubkeys:\n for uid in k['uids']:\n match = RCPT_RE.search(uid)\n if match is not None:\n # check for key expiration\n if k['expires'] == '':\n pubkey_ids.append(k['fingerprint'][-16:])\n elif (time()+60) < float(k['expires']):\n pubkey_ids.append(k['fingerprint'][-16:])\n break\n return pubkey_ids", "def get_seqs_to_keep_lookup_from_fasta_file(fasta_f):\r\n return (\r\n set([seq_id.split()[0] for seq_id, seq in parse_fasta(fasta_f)])\r\n )", "def _findSamesetProteins(protToPeps, proteins=None):\n proteins = viewkeys(protToPeps) if proteins is None else proteins\n\n equalEvidence = ddict(set)\n for protein in proteins:\n peptides = protToPeps[protein]\n equalEvidence[tuple(sorted(peptides))].add(protein)\n equalProteins = list()\n for proteins in viewvalues(equalEvidence):\n if len(proteins) > 1:\n equalProteins.append(tuple(sorted(proteins)))\n return equalProteins", "def bond_stereo_keys(sgr):\n bnd_ste_keys = dict_.keys_by_value(bond_stereo_parities(sgr),\n lambda x: x in [True, False])\n return bnd_ste_keys", "def find_intersection(snp_name):\n intersect = set(snp_name[0])\n for i in range(1,len(snp_name)):\n intersect = intersect.intersection(set(snp_name[i]))\n return list(intersect)", "def get_gene_transcript_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col).reset_index()\n r = {}\n for gene_id, s in df.groupby('GeneId'):\n r[gene_id] = s.TranscriptId.tolist()\n return r", "def __SAGG_helper(self, agg):\r\n list_of_sagg_relation = list()\r\n if agg is None:\r\n logger.info(\"There are no SAGG relations\")\r\n else:\r\n for relation in agg:\r\n if relation.attrib.get(\"ci\") == relation.attrib.get(\"cj\"):\r\n sagg_tuple = tuple()+ (relation.attrib.get(\"ci\"), )\r\n list_of_sagg_relation.append(sagg_tuple)\r\n logger.debug(\"Found SAGG: (%s)\" % (sagg_tuple[0]))\r\n list_of_sagg_relation = list(dict.fromkeys(list_of_sagg_relation))\r\n return list_of_sagg_relation" ]
[ "0.6257982", "0.6162774", "0.59048253", "0.59010804", "0.5849605", "0.56463474", "0.54594696", "0.54492325", "0.5428095", "0.5416582", "0.5408381", "0.5408381", "0.53714514", "0.53380275", "0.5297365", "0.5293599", "0.527684", "0.5273915", "0.5239893", "0.5224164", "0.52092683", "0.5203469", "0.5181401", "0.5179398", "0.51548374", "0.5129792", "0.511828", "0.51033705", "0.509098", "0.5079638" ]
0.8354356
0
Gets the mapSquare object at x,y
def get_square(self, x, y): if x < 0 or x > self.width-1 or y < 0 or y > self.height-1: return MapSquare(x, y, Tile.Wall, '~') # return a wall if at end of map return self.mapArray[y][x]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSquare(x, y):\n\n\tglobal theMap, width, height\n\n\treturn theMap[x + y * width]", "def get_map_square(x, y):\n result = MAP_SQUARE_ROCK\n if ((x >=0 and x< width) and (y>= 0 and y< height)): # LT i think done TODO: Replace False with a condition that checks if the values x and y are valid. Valid index values start at 0. x must be less than width and y must be less than height. Negative numbers are not valid.\n row= dungeon_map[y]\n result= row[x] # LT... done? see bitmap hw in comments below TODO: Replace None with an expression that uses x and y to get the right value from dungeon_map. \n return result", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def get_square(self, row, col):\n\n return self.board[row][col]", "def get_object_at_location(self, x, y):\n object_map_at_target_location = self.maps.get((x, y))\n if not object_map_at_target_location:\n return None\n return object_map_at_target_location.get_real_object()", "def get_object_at_location(cls, x, y):\n object_map_at_target_location = cls.query\\\n .filter_by(x=x, y=y).one_or_none()\n if not object_map_at_target_location:\n return None\n return object_map_at_target_location.get_real_object()", "def get(self, x, y):\n if (x < 0 or x > self.width-1) or (y < 0 or y > self.height-1):\n return (mapfeatures.Void(), [])\n cell_entities = list(filter(lambda e: e.x == x and e.y == y, self._entities))\n return (self._mapfeatures[y][x], cell_entities)", "def get(self, x, y):\n i = self.map[y][x]\n return self.get(i)", "def cell_at(self, x, y):\n\n return self.maze_map[x][y]", "def get_square(self, index: int):\n return self.squares[index]", "def square(self):\n return self.x * self.x + self.y * self.y", "def inside_square(self, x, y):\n square_centers = self.get_square_centers()\n for i, row in enumerate(square_centers):\n for j, (square_x, square_y) in enumerate(row):\n\n if (square_x - self.square_width_half < x < square_x + self.square_width_half and\n square_y - self.square_width_half < y < square_y + self.square_width_half):\n\n return (i, j), (float(square_x), float(square_y))\n\n return None, None", "def square(self, row, col):\n return self.board[row][col]", "def square(self, row, col):\n return self._board[row][col]", "def coord(self, x, y):\n origin_x = self._raster_meta['transform'][3]\n origin_y = self._raster_meta['transform'][0]\n pixel_x = self._raster_meta['transform'][5]\n pixel_y = self._raster_meta['transform'][1]\n\n x = int((x - origin_x) / pixel_x)\n y = int((y - origin_y) / pixel_y)\n return self[x, y]", "def square_tofrom_square(newsystem, coord):\n return newsystem.coord(x=coord.x, y=coord.y)", "def Pixel2World(geoMatrix, x, y):\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xdist = geoMatrix[1]\r\n ydist = geoMatrix[5]\r\n coorX = (ulX + (x * xdist))\r\n coorY = (ulY + (y * ydist))\r\n return (coorX, coorY)", "def get_square(self, index):\n square = []\n for cell in self.squares[index]:\n square.append(self.content[cell[0]][cell[1]])\n return square", "def get_tile(self, x, y):\n\n try:\n row = int(x/self.box_width)\n col = int(y/self.box_height)\n return self.tiles[col][row]\n except IndexError:\n return Tile(None, 0,0,0,0, h=0,s=0,v=0)\n except ValueError as e:\n print 'ValueError encountered.'\n print 'x: ', x\n print 'box_width: ', self.box_width\n raise e", "def get(self):\n return (self.x,self.y);", "def get_piece(self, square):\n return self.board[square.row][square.col]", "def get_piece(self, square):\n return self.board[square.row][square.col]", "def cell_from_xy(self,x,y):\n return self.cell_array.item((x,y))", "def square(self, row, col):\n if 0 == row:\n if 0 == col:\n return self.tl\n elif 1 == col:\n return self.tc\n elif 2 == col:\n return self.tr\n elif 1 == row:\n if 0 == col:\n return self.ml\n elif 1 == col:\n return self.mc\n elif 2 == col:\n return self.mr\n elif 2 == row:\n if 0 == col:\n return self.bl\n elif 1 == col:\n return self.bc\n elif 2 == col:\n return self.br\n raise TypeError(\n \"No such (row, column) pair: each must be in range 0-2 inclusive\")", "def get_our_tile(self, x, y):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\treturn self.our_tiles[x][y]\n\t\treturn None", "def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)", "def room_xy(room, x, y, value=None):\n return room[x][y]", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def position(square):\n first = square[0]\n second = square[1]\n col = parseCol(first)\n row = parseRow(second)\n return (row, col)", "def room_at(self, x, y):\r\n return self.__maze[x][y]" ]
[ "0.8520868", "0.7586103", "0.685801", "0.68072706", "0.66959095", "0.6570747", "0.65456736", "0.6534609", "0.65282583", "0.65212566", "0.6491779", "0.64886266", "0.64586496", "0.6385448", "0.6362551", "0.63256884", "0.63170105", "0.6296599", "0.6267629", "0.6230384", "0.62274116", "0.62274116", "0.6225007", "0.61443084", "0.6142506", "0.613254", "0.6130867", "0.6116066", "0.60970193", "0.60898656" ]
0.81357706
1
Gets the display object at a square, for use by Viewport class
def get_display_object(self, x, y): if x < 0 or x >= self.width: return DisplayObject.StaticObject(chr(0b11110111)) if y < 0 or y >= self.height: return DisplayObject.StaticObject(chr(0b11110111)) return self.mapArray[y][x].get_display_object()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_display_object(self):\n if len(self.objects) == 0:\n return self.tile.get_display_object()\n else:\n return self.objects[-1].get_display_object()", "def get_square(self, x, y):\n if x < 0 or x > self.width-1 or y < 0 or y > self.height-1:\n return MapSquare(x, y, Tile.Wall, '~') # return a wall if at end of map\n return self.mapArray[y][x]", "def getSquare(x, y):\n\n\tglobal theMap, width, height\n\n\treturn theMap[x + y * width]", "def get_square(self, row, col):\n\n return self.board[row][col]", "def get_board(self):\n return self.squares", "def get_square(self, index: int):\n return self.squares[index]", "def get_piece(self, square):\n return self.board[square.row][square.col]", "def get_piece(self, square):\n return self.board[square.row][square.col]", "def square(self, row, col):\n return self.board[row][col]", "def square(self, row, col):\n return self._board[row][col]", "def get_pixels(self):\n\n # pygame board needs to be initialized the first time\n if not self.board:\n self.setup_display(render_gui=False)\n\n self.draw_window(draw_leaderboard=False)\n pixels = pygame.surfarray.array3d(self.window)\n return np.moveaxis(pixels, 1, 0)", "def __init__(self):\n self.size = width, height = pygame.display.Info().current_w, pygame.display.Info().current_h\n self.screen = pygame.display.set_mode(self.size)\n self.x = int((width - 910) / 2)\n self.y = int((height - 675) / 2)", "def get_square(self, index):\n square = []\n for cell in self.squares[index]:\n square.append(self.content[cell[0]][cell[1]])\n return square", "def display_board(self, screen):\n for wall in self.cube_walls_list:\n screen = wall.draw_rhombus(screen)\n for tile in self.tile_rhombus_list:\n screen = tile.draw_rhombus(screen)\n\n return screen", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def getSnappedWidget(self):\n if self.row>=0 and self.col>=0:\n return self.sheet.getCell(self.row, self.col)\n else:\n return None", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]", "def get_tile(self, row, col):\n return self.grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self.grid[row][col]", "def get_game_piece_object_at_position(self, position):\n\n column, row = self.transpose_position(position)\n\n return self.get_board()[int(row)][int(column)]", "def index_to_square(self, this_index: int) -> Square:\n return self.squares[this_index]", "def get_tile(self, row, col):\r\n return self._grid[row][col]", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def world_to_screen(self, x, y):\n return x-self.x, self.h-(y-self.y)", "def get_pos(self, off_w=0, off_l=0, off_h=0):\n try:\n return self.world_grid[self.w + off_w][self.l + off_l][self.h + off_h]\n except IndexError:\n return blocks['wall']", "def get_screen(env):\n # Returned screen requested by gym is 400x600x3\n # Transpose it into torch order (CHW).\n screen = env.render(mode='rgb_array').transpose((2, 0, 1))\n # Convert to float, rescale, convert to torch tensor\n screen = np.ascontiguousarray(screen, dtype=np.float32) / 255\n screen = torch.from_numpy(screen)\n # Resize, and add a batch dimension (BCHW)\n return resize(screen).unsqueeze(0)", "def square(self):\n return self.x * self.x + self.y * self.y", "def get_tile(self, row, col):\n return self._grid[row][col]" ]
[ "0.6762274", "0.6488981", "0.6479352", "0.638152", "0.62307405", "0.6205239", "0.61711794", "0.61711794", "0.611314", "0.6035994", "0.5998655", "0.59058344", "0.58368677", "0.58368266", "0.58320564", "0.58320564", "0.58320564", "0.58304864", "0.5829658", "0.5827257", "0.5822391", "0.5819918", "0.58027697", "0.58017683", "0.5788448", "0.5787912", "0.5733388", "0.57249266", "0.57174563", "0.5717167" ]
0.7273723
0
Gets the appropriate object for rendering on a Display. Will be the tile if there are no objects in the square. Otherwise, will be the topmost object.
def get_display_object(self): if len(self.objects) == 0: return self.tile.get_display_object() else: return self.objects[-1].get_display_object()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_display_object(self, x, y):\n if x < 0 or x >= self.width:\n return DisplayObject.StaticObject(chr(0b11110111))\n if y < 0 or y >= self.height:\n return DisplayObject.StaticObject(chr(0b11110111))\n return self.mapArray[y][x].get_display_object()", "def getone(self, Cl):\n for object in self.ginfo.sprites():\n if isinstance(object, Cl):\n return object\n else:\n return None", "def get_tile(self):\n return Tile.get_tile(self.get_number())", "def getTile(self):\n return self.tile", "def get(self, display):\n if isinstance(display, Split):\n return display\n try:\n return [p for p in self.panes if p.display == display][0]\n except IndexError:\n return None", "def get_tile(self, x, y):\n\n try:\n row = int(x/self.box_width)\n col = int(y/self.box_height)\n return self.tiles[col][row]\n except IndexError:\n return Tile(None, 0,0,0,0, h=0,s=0,v=0)\n except ValueError as e:\n print 'ValueError encountered.'\n print 'x: ', x\n print 'box_width: ', self.box_width\n raise e", "def getObject(self, row, column, gameGrid=None):\n if not gameGrid:\n gameGrid = self.gameGrid\n return gameGrid.getItem(row, column)", "def get_tile(self, row, col):\n return self.grid[row][col]", "def get_object_at_location(self, x, y):\n object_map_at_target_location = self.maps.get((x, y))\n if not object_map_at_target_location:\n return None\n return object_map_at_target_location.get_real_object()", "def get_our_tile(self, x, y):\n\t\tif x >= 0 and x < self.w and y >= 0 and y < self.h:\n\t\t\treturn self.our_tiles[x][y]\n\t\treturn None", "def get_tile(self, row, col):\r\n return self._grid[row][col]", "def get_tile(self, row, col):\n tile_index = (row - 1) * self.num_col_tiles + (col - 1)\n tile = self.tiles[tile_index]\n return tile", "def get_tile(self, row, col):\n return self._grid[row][col]", "def get_object_at_location(cls, x, y):\n object_map_at_target_location = cls.query\\\n .filter_by(x=x, y=y).one_or_none()\n if not object_map_at_target_location:\n return None\n return object_map_at_target_location.get_real_object()", "def get_game_piece_object_at_position(self, position):\n\n column, row = self.transpose_position(position)\n\n return self.get_board()[int(row)][int(column)]", "def get_object(self):\n if not self.user.is_authenticated():\n raise Http404('Access denied')\n self.url_name = self.request.resolver_match.url_name\n if self.url_name == 'sticker-detail':\n return Sticker.objects.get(\n board__desk__owner__user=self.user,\n board__prefix=self.kwargs['prefix'],\n sequence=self.kwargs['sequence']\n )\n elif self.url_name == 'board-comments':\n return Board.objects.get(\n desk__owner__user=self.user,\n sequence=self.kwargs['board_sequence']\n )\n elif self.url_name == 'sprint-comments':\n return Sprint.objects.get(\n number=self.kwargs['sprint_number'],\n board__desk__owner__user=self.user,\n board__sequence=self.kwargs['board_sequence']\n )", "def top_visible_entity(self):\n if self.size == 0:\n return None\n\n i = self.size - 1\n while i >= 0:\n e = self[i]\n if e.visible:\n return e\n i = i - 1\n return None", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, x, y):\n if x < 0 or x >= Settings.SIZE_X or y < 0 or y >= Settings.SIZE_Y:\n return MarkerType.NONE\n return self.__grid[y][x]", "def get_tile(self, row, col):\r\n\r\n return self._board[row][col]", "def get_tile(cls, tile_id):\n\n return Tile.tile_listing.get(tile_id, None)", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self.grid[row][col]", "def get_tile(self, row, col):\r\n \r\n return self._cells[row][col]", "def getDisplay(self):\n for display in self.listDisplays():\n if self.testDisplay(display):\n if self.verbose:\n print \"Got an existing working display on %s\" % display\n return display\n if self.verbose:\n print \"Not found any existing working display\"\n\n return self.startServer()", "def get_surface(self, name):\n for surface in self._surfaces:\n if surface.name == name:\n return surface\n return None", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self._grid_tile[row][col]", "def find_closest(self, cls):\n closest = None\n shortest_dist = None\n for sprite in self.game.entities[ALL_SPRITES]:\n if isinstance(sprite, cls):\n curr_dist = distance((self.x, self.y), (sprite.x, sprite.y))\n if shortest_dist is None or curr_dist < shortest_dist:\n closest = sprite\n shortest_dist = curr_dist\n return closest" ]
[ "0.7496521", "0.6938144", "0.6659613", "0.6383434", "0.63334805", "0.6095585", "0.60727197", "0.60512614", "0.6021134", "0.6013403", "0.59988815", "0.596359", "0.5947265", "0.59074265", "0.5899318", "0.58803385", "0.5866117", "0.58558357", "0.58558357", "0.58558357", "0.5854277", "0.58518416", "0.58438075", "0.5819456", "0.58168036", "0.57780045", "0.57659596", "0.5758018", "0.57544553", "0.574884" ]
0.8527499
0
Transform string of form '2020W10' into tuple (2020, 10)
def get_yearweek(yearweekstr: str) -> tuple: return tuple(map(int, yearweekstr.split('-W')))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_date_string(date_string: str):\n try:\n units,_,epoch = date_string.split(None, 2)\n except ValueError:\n raise ValueError(f'Invalid format: {date_string}')\n else:\n return (units.lower(), parse(epoch))", "def split_date_string(date_string: str):\n try:\n units,_,epoch = date_string.split(None, 2)\n except ValueError:\n raise ValueError(f'Invalid format: {date_string}')\n else:\n return (units.lower(), parse(epoch))", "def split_fr_date(date_str):\n day=date_str[:2]\n month=date_str[3:5]\n try:\n #date coded on 4 digits\n year=date_str[6:]\n except:\n #date coded on 2 digits\n year=date_str[4:]\n\n return year, month, day", "def stringTimeToTuple_NEW(st):\n st, ms = split(st, '.')\n y, m, d, h, n, s = split(st, '_')\n return y,m,d,h,n,s,ms", "def version_str2tuple(version_str):\n if not isinstance(version_str, str):\n TypeError('version_str must be a string.')\n version_info_list = re.findall(r'[0-9a-zA-Z]+', version_str)\n\n def convert_to_int(string):\n value = None\n if re.match(r'^\\d+$', string):\n value = int(string)\n else:\n value = string\n return value\n\n version_info_list = (convert_to_int(s) for s in version_info_list)\n\n return tuple(version_info_list)", "def extract(d):\n \n Y, M, D, W, H = (None for _ in range(5))\n \n def get_hour(groups):\n H, m, s = (int(x) for x in groups[4:7])\n if groups[8] == 'am' and H == 12:\n H = 0\n if groups[8] == 'pm' and 0 < H < 12:\n H += 12\n return H + m/60 + s/3600\n \n if type(d) == str:\n d = d.lower()\n match = re.match(r'^(\\d+)/(\\d+)/(20\\d+)( (\\d+):(\\d+):(\\d+)( (am|pm))?)?', d)\n if match is None:\n match = re.match(r'^(\\d+)-([a-z]+)-(\\d+)( (\\d+):(\\d+):(\\d+)( (am|pm))?)?', d)\n if match is None:\n return\n else:\n month = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']\n D = int(match.group(1))\n M = month.index(match.group(2)) + 1\n Y = 2000 + int(match.group(3))\n W = datetime.date(Y, M, D).timetuple()[6]\n if match.group(4) is None:\n H = -1\n else:\n H = get_hour(match.groups())\n else:\n M, D, Y = (int(x) for x in (match.groups())[:3])\n W = datetime.date(Y, M, D).timetuple()[6]\n if match.group(4) is None:\n H = -1\n else:\n H = get_hour(match.groups())\n return (Y, M, D, W, H)", "def get_year(string): \n return int(string[11:15])", "def str2tuple(str):\n pack = (int(str[1]), int(str[4]))\n return pack", "def split_date(value):\n if not is_valid_date(value):\n return ('', '', '')\n\n splited = value.split('-')\n\n try:\n year = splited[0]\n except IndexError:\n year = ''\n\n try:\n month = splited[1]\n except IndexError:\n month = ''\n\n try:\n day = splited[2]\n except IndexError:\n day = ''\n\n return (year, month, day)", "def word_to_tuple(word):\n # since strings are sequences of letters\n # `sorted` will automatically convert a string\n # to a list, then sort it\n word = tuple(sorted(word))\n return word", "def year_expand(s):\n regex = r\"^((?:19|20)\\d{2})?(\\s*-\\s*)?((?:19|20)\\d{2})?$\"\n try:\n start, dash, end = re.match(regex, ustr(s)).groups()\n start = start or 1900\n end = end or 2099\n except AttributeError:\n return 1900, 2099\n return (int(start), int(end)) if dash else (int(start), int(start))", "def iso_timestamp_split(iso_time: ISOTimestamp) -> Tuple[str, str, str]:\n m = re.match(r'^(.{19})((?:\\.\\d+)?)(.*)$', iso_time)\n\n return m.group(1), m.group(2), m.group(3)", "def parse_year(txt):\n\n txt = txt.strip()\n if \"-\" in txt:\n res = re.sub('[^0-9]', '', txt)\n return [res[0:4], res[4:8]]\n else:\n return [txt, txt]", "def _read_sansculottide_date(match):\n day_string = match.group(1)\n d = None\n\n for n, candidate in enumerate(names.sans_culottides):\n if candidate.sanitized == day_string:\n d = n\n break\n else:\n return\n\n y = roman_to_decimal(match.group(2))\n\n return (y, 13, d)", "def year_parse(s):\n regex = r\"((?:19|20)\\d{2})(?:$|[-/]\\d{2}[-/]\\d{2})\"\n try:\n year = int(re.findall(regex, ustr(s))[0])\n except IndexError:\n year = None\n return year", "def dateParser(str):\n\tyear = ''\n\tfor c in str:\n\t\tif c.isspace():\n\t\t\tyear = ''\n\t\telif c.isdigit():\n\t\t\tyear = year + c\n\t\t\tif len(year) == 4:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tyear = ''\n\tif len(year) < 4:\n\t\treturn None\n\treturn int(year)", "def get_week_date(self, raw_week: str) -> tuple:\n\n search_result = re.search(r'^(\\d+.\\d+)\\s+-\\s+\\d+.\\d+', raw_week)\n\n if \"from\" in raw_week:\n week = re.sub(r'^\\D+', '', raw_week)\n\n elif search_result:\n week = search_result.group(1)\n else:\n week = \"{}.{}\".format(current_day, current_month)\n\n week_in_date_format_1900 = datetime.datetime.strptime(week, \"%d.%m\")\n currect_week = week_in_date_format_1900.replace(current_year)\n\n return currect_week.isoformat(), currect_week.isocalendar()[1]", "def string_to_years(s):\n pattern = r'\\d\\d\\d\\d'\n r = re.compile(pattern)\n min_year = 1960\n max_year = datetime.now().year + 1\n return list(filter(lambda y: y >= min_year and y <= max_year, map(int, r.findall(s))))", "def year_tracker(words):\n new_words = []\n for w in words:\n new_word = re.sub(r\"^[1][789][0-9]{2}$\", \"jahreszahl\", w) # for 1700-1999\n new_word = re.sub(r\"^[2][01][0-9]{2}$\", \"jahreszahl\", new_word) # for 2000-2199\n new_words += [new_word]\n return new_words", "def extract_season_episode_from_str(s):\n m = check_for_season_episode_code(s)\n\n if not m:\n return 1, 1\n\n return int(m.group(1)), int(m.group(2))", "def get_date(date_str):\n date_list = date_str.split(\"-\")\n start_date, end_date = map(lambda x: x.strip(), date_list)\n return (start_date, end_date)", "def get_year_month_day_from_date(date: str) -> tuple:\n match = re.findall(\"\\d{2,4}\", date[:len(DATE_FORMAT)])\n if len(match) != 3:\n raise ValueError(\"Invalid date input given at regex match. Got (%s)\" % date)\n year, month, day = match\n if len(year) != 4 or len(month) != 2 or len(day) != 2:\n raise ValueError(\"Invalid date input given. Got (%s)\" % date)\n return year, month, day", "def findYear(str):\n return int(re.search(\"(?<=yr=)\\d*\", str)[0])", "def build_date():\n def r(x):\n return tuple(ord(i) for i in x)\n return r", "def parse(s):\n m, d, y = s.split('/')\n mo = int(m)\n da = int(d)\n yr = int(y)\n d = datetime.date(yr, mo, da)\n return d", "def get_week_from_datestr(datestr: str) -> int:\n return date.fromisoformat(datestr).isocalendar()[1]", "def parse(arg: Tuple[str, str, str, str, str]) -> Tuple[str, str, str]:\n return (arg[2], arg[3], arg[4])", "def parse(self, str):\n values = self._exp.findall(str)\n if values is None or len(values) == 0:\n return None\n\n values = values[0]\n assert(len(values) == 3)\n\n day = int(values[self._dmy_idx[0]])\n month = int(values[self._dmy_idx[1]])\n year = int(values[self._dmy_idx[2]])\n\n return date(year, month, day)", "def extract_year(text):\n # type: (str) -> int\n data = re.search(r\"\\d{4}\", text)\n return int(data.group()) if data else 0", "def parse_string_datetime(date):\n date_string_parse = date.split('/')\n year = int(date_string_parse[0])\n month = int(date_string_parse[1])\n day = int(date_string_parse[2])\n return year, month, day" ]
[ "0.5761229", "0.5761229", "0.56280375", "0.5603652", "0.55228376", "0.5497433", "0.5434086", "0.54046214", "0.5305967", "0.5301249", "0.5266323", "0.52434844", "0.5174696", "0.5173403", "0.5165354", "0.51584536", "0.5139238", "0.51305723", "0.5117815", "0.5103784", "0.50881475", "0.50569", "0.5044383", "0.5036961", "0.49877992", "0.49693874", "0.49590558", "0.4955484", "0.4945731", "0.4918217" ]
0.76194024
0
Get the week number from ISO formatted string
def get_week_from_datestr(datestr: str) -> int: return date.fromisoformat(datestr).isocalendar()[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findWeekend(str):\n return int(re.search(\"(?<=wknd=)\\d*\", str)[0])", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def get_weekday_number(date):\n return date.strftime('%w')", "def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7", "def current_week_number(date=datetime.datetime.now()):\n return int(date.strftime(\"%W\"))", "def get_yearweek(yearweekstr: str) -> tuple:\n return tuple(map(int, yearweekstr.split('-W')))", "def week(self):\n if self._week.lower() == 'wild card':\n return WILD_CARD\n if self._week.lower() == 'division':\n return DIVISION\n if self._week.lower() == 'conf. champ.':\n return CONF_CHAMPIONSHIP\n if self._week.lower() == 'superbowl':\n return SUPER_BOWL\n return self._week", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def WeekdayNum(name):\n return _WEEKDAY_DICT.get(name.capitalize(), 0)", "def get_today_week_number(self):\n\n today = date.today()\n iso_result = today.isocalendar()\n return iso_result[1]", "def ISOWEEKNUM(\n date: func_xltypes.XlDateTime\n) -> func_xltypes.XlNumber:\n\n datetime_date = utils.number_to_datetime(int(date))\n isoweeknum = datetime_date.isocalendar()[1]\n return isoweeknum", "def week_index(self) -> pulumi.Input[Union[str, 'Type']]:\n return pulumi.get(self, \"week_index\")", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def week_fromordinal(cls, ordinal):\n return int(math.floor(cls.day_fromordinal(ordinal) / 7)) + 1", "def current_week(self):\n\n if not self.iso_equal() and self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 2\n if not self.iso_equal() or self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 1 \n return self.time_stamp_iso[1]", "def WEEKNUM(date, return_type=1):\n if return_type == 21:\n return ISOWEEKNUM(date)\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n date = _make_datetime(date)\n jan1 = datetime.datetime(date.year, 1, 1)\n week1_start = jan1 - datetime.timedelta(days=(jan1.weekday() - first) % 7)\n return (date - week1_start).days // 7 + 1", "def getCurrentWeek(self):\n return self.wcount % 48", "def day_of_week(self) -> str:\n return self.elements[4]", "def _DayNumToWeekdayNum(daynum):\n return (daynum + _WEEKDAY_BASE) % NUM_WEEKDAYS", "def GetWeekString(self, basic=False, truncation=NoTruncation):\n century, decade, year, week, day = self.GetWeekDay()\n if day is None:\n if week is None:\n # same as the calendar string\n return self.GetCalendarString(basic, truncation)\n else:\n if truncation == NoTruncation:\n if basic:\n return \"%02i%i%iW%02i\" % (century, decade, year, week)\n else:\n return \"%02i%i%i-W%02i\" % (century, decade, year, week)\n elif truncation == Truncation.Century:\n if basic:\n return \"%i%iW%02i\" % (decade, year, week)\n else:\n return \"%i%i-W%02i\" % (decade, year, week)\n elif truncation == Truncation.Decade:\n if basic:\n return \"-%iW%02i\" % (year, week)\n else:\n return \"-%i-W%02i\" % (year, week)\n elif truncation == Truncation.Year:\n return \"-W%02i\" % week\n else:\n raise ValueError\n else:\n if truncation == NoTruncation:\n if basic:\n return \"%02i%i%iW%02i%i\" % (\n century, decade, year, week, day)\n else:\n return \"%02i%i%i-W%02i-%i\" % (century,\n decade,\n year,\n week,\n day)\n elif truncation == Truncation.Century:\n if basic:\n return \"%i%iW%02i%i\" % (decade, year, week, day)\n else:\n return \"%i%i-W%02i-%i\" % (decade, year, week, day)\n elif truncation == Truncation.Decade:\n if basic:\n return \"-%iW%02i%i\" % (year, week, day)\n else:\n return \"-%i-W%02i-%i\" % (year, week, day)\n elif truncation == Truncation.Year:\n if basic:\n return \"-W%02i%i\" % (week, day)\n else:\n return \"-W%02i-%i\" % (week, day)\n elif truncation == Truncation.Week:\n return \"-W-%i\" % day\n else:\n raise ValueError", "def date_to_week(y, m, d):\r\n return datetime.datetime(y, m, d).strftime(r'%YW%W')", "def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n first_monday += datetime.timedelta(weeks=1)\n diff = date_trunc('day', date) - first_monday\n week = 1 + (diff.days / 7)\n return week, first_monday.year", "def get_day_of_week_string(date_string):\n\n # Split on / string, and feed to a datetime object, to use weekday function\n date_strings = date_string.split(\"/\")\n update_date = datetime.datetime(int(date_strings[2]), int(date_strings[1]), int(date_strings[0]))\n weekDays = (\"Mon\", \"Tue\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\")\n day_of_week = str(weekDays[update_date.weekday()])\n return day_of_week", "def get_week(time_index):\n return np.array(time_index.week).reshape(-1,1)", "def str_day(s):\n # TODO: Fix the -06:00 time zone offset\n if s:\n d = convert_from_iso(s)\n return datetime.datetime.strftime(d, \"%d\").strip(\" \")\n else:\n # Couldn't parse, return original.\n return s", "def CONST_WEEK_TIMESTAMP() -> int:\n return 604800", "def get_week_from_date(date) -> int:\n month, year = date.month, date.year\n if month < 4:\n year -= 1\n ld = _labor_day(year)\n wk1_wed = ld + timedelta(days=2)\n days_since = (date - wk1_wed).days\n weeks_since = days_since / 7.\n week = math.floor(weeks_since) + 1\n return int(week)", "def current_week() -> int:\n now = datetime.now()\n return get_week_from_date(now)", "def WeekdayName(num, length=99):\n if num < 1 or num > NUM_WEEKDAYS:\n raise ValueError('Bad weekday number')\n return _WEEKDAY_NAMES[num][:length]" ]
[ "0.7321482", "0.7087024", "0.69715667", "0.68040544", "0.6613867", "0.6562995", "0.6536886", "0.6459331", "0.63348484", "0.61817926", "0.61559933", "0.61237025", "0.61049557", "0.6078325", "0.60460734", "0.6043959", "0.6013798", "0.60136014", "0.5982375", "0.5924236", "0.589889", "0.5894432", "0.5890996", "0.5869367", "0.5812805", "0.5766074", "0.57545704", "0.5732185", "0.5723579", "0.57029533" ]
0.77668357
0
Function that normalises the admission to be per 100k of the population of that country.
def normalize_admission_val(get_population: Callable, row: pd.Series) -> float: val = row['value'] # Note that in the provided admission dataframe, only the daily data is given in absolute value if row['indicator'] in (ADMISSION_INDICATORS['weekly_norm'], ADMISSION_INDICATORS['weekly_icu']): return val return val * 100000 / get_population(row['country'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def improve_population(self):\r\n for index in range(len(self.district_population)):\r\n district = self.district_population[index]\r\n districtsolution = hillclimber.HillClimber(district, self.cable_cost, self.battery_cost)\r\n self.district_population[index] = districtsolution.run(1000, 80000)\r\n self.cost_populations[index] = district.total_cost(self.battery_cost, self.cable_cost)", "def process_admission(admission_df: pd.DataFrame):\n get_population = Populations() # Populations() is set up as a closure, enabling O(1) lookup\n\n admission_df['year_week'] = admission_df['year_week'].apply(get_yearweek) # change week string into tuple of ints\n admission_df['value'] = admission_df.apply(partial(normalize_admission_val, get_population), axis=1) # notmalise daily data by the population\n\n norm_admission = admission_df[(admission_df['indicator'] == ADMISSION_INDICATORS['daily_norm']) | (admission_df['indicator'] == ADMISSION_INDICATORS['weekly_norm'])].groupby(['country', 'year_week'], as_index=False).sum().rename(columns={'value': 'norm'})\n icu_admission = admission_df[(admission_df['indicator'] == ADMISSION_INDICATORS['daily_icu']) | (admission_df['indicator'] == ADMISSION_INDICATORS['weekly_icu'])].groupby(['country', 'year_week'], as_index=False).sum().rename(columns={'value': 'icu'})\n\n return pd.merge(norm_admission, icu_admission, how=\"outer\", on=['country', 'year_week'])", "def gricells_to_adm0(myC = 'CO', _haz='PF', src = 'pop_affected.csv'):\n global _df\n # Find the pop/space affected\n var = src[:src.index('_')]\n # Get the return period x basin data\n _df = pd.read_csv(src).drop(var, axis = 1)\n # Assign names to the indices\n _df.index.name = 'gridcell'\n _df.columns.name = 'rp'\n # assign dtypes\n _df.columns = _df.columns.astype(int)\n # get a basin,rp index\n _df = _df.stack().to_frame()\n global rps,inv_rps\n # Get a list of RPS\n rps = list(_df.index.levels[1].astype(int))\n # If the first rp isn't 1, then add it to the beginning and assume that there isn't any damage\n if rps[0] != 1.: rps = np.append([1.],[rps])\n # Calculate inverse RPS\n inv_rps = [1/i for i in rps]\n # Calculate final rps... any reason why this is missing 5?\n final_rps = [1, 20, 50, 100,250, 500, 1000,1500,2000]\n # Get an empty dataframe with country, final rps as the x axis\n final_exceedance = pd.DataFrame(index= pd.MultiIndex.from_product([[myC],final_rps]))\n # Set loss to None\n final_exceedance['loss'] = None\n # create dataframe to store random numbers\n loss = pd.DataFrame(index=_df.sum(level='gridcell').index).reset_index()\n loss['myC'] = myC\n loss.set_index(['myC','gridcell'], inplace = True)\n lossc = loss.sum(level = 'myC')\n loss = loss.reset_index().set_index('myC')\n\n # generate random numbers\n NYECOS = int(1E4) # <-- any multiple of 10K\n for _yn in range(NYECOS):\n loss['_'] = [np.random.uniform(0,1) for i in range(loss.shape[0])]\n loss['y'+str(_yn)] = loss.apply(lambda x:random_to_loss(x.gridcell,x['_']),axis=1)\n\n if _yn != 0 and (_yn+1)%500 == 0:\n\n lossc = pd.concat([lossc,loss.drop('_',axis=1).sum(level='myC')],axis=1)\n loss = loss[['gridcell']]\n print(_yn+1)\n\n for _reg in loss.index.values:\n aReg = lossc.loc[_reg].sort_values(ascending=False).reset_index()\n\n for _frp in final_rps:\n final_exceedance.loc[(_reg,_frp),'loss'] = float(aReg.iloc[int((NYECOS-1)/_frp)][_reg])\n\n total_pop = pd.read_csv('{}_affected.csv'.format(var))[var].sum()\n (final_exceedance/total_pop).to_csv('../inputs/'+myC+'regional_exceedance_'+_haz+src[:2]+'.csv')", "def adjusted_pa(personal_allowance, salary):\n\t\tlo, hi = 100000, 120000\n\t\tif salary <= lo:\n\t\t\treturn personal_allowance\n\t\telif salary >= hi:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn (salary - 100000) / 2", "def normalise_to(self, population: float):\n\n new_geno = GenoDistrib(self._default_probs, False)\n if population == 0.0: # small optimisation shortcut\n return new_geno\n truncated_store = self._normalise_to(population)\n new_geno._store = truncated_store\n new_geno._gross = population\n return new_geno", "def standardization(a, p):\r\n return a * 10 / 100 * p * p", "def per_capi(country):\r\n df = ouvrir_fichier()\r\n df = df.loc[df['country'].isin([country])]\r\n df = df[(df[\r\n 'emissions'] == 'Emissions per capita (metric tons of carbon dioxide)'\r\n )]\r\n resultat = {}\r\n longeur = len(df)\r\n for i in range(longeur):\r\n resultat[int(df.iloc[i][2])] = float(df.iloc[i][4])\r\n\r\n return resultat", "def normalise(self):\n fitness_sum = np.sum(self.fitness)\n for i in range(self.loops):\n self.normalised_fitness[i] = self.fitness[i] / fitness_sum", "def standardizedIncome(dfIn, dfOut):\n nIncome = pd.Series(scale(np.log(dfIn['AMT_INCOME_TOTAL'])), name = 'scaledLogINC')\n dfOut = pd.concat([dfOut, nIncome], axis = 1)\n return dfOut", "def clean_data():\n datapath = Path(os.getcwd()) / \"data\"\n files = [str(file) for file in datapath.glob(\"*.csv\")]\n for file in files:\n if file.endswith(\"confirmed.csv\"):\n Confirmed = pd.read_csv(file)\n elif file.endswith(\"deaths.csv\"):\n Deaths = pd.read_csv(file)\n elif file.endswith(\"recovered.csv\"):\n Recovered = pd.read_csv(file)\n\n dataFrames = [Confirmed, Deaths, Recovered]\n countryList = list(dataFrames[0][\"Country/Region\"]) #list of valid countries\n countryList = list(dict.fromkeys(countryList))\n\n #create country population dictionary and align values with those in countryList\n countriesPop = {}\n countriesPop[\"US\"] = CountryInfo(\"USA\").population()\n countriesPop[\"Czechia\"] = CountryInfo(\"Czech Republic\").population()\n countriesPop[\"Taiwan*\"] = CountryInfo(\"Taiwan\").population()\n countriesPop[\"Korea, South\"] = CountryInfo(\"South Korea\").population()\n countriesPop[\"Eswatini\"] = CountryInfo(\"Swaziland\").population()\n countriesPop[\"Cote d'Ivoire\"] = CountryInfo(\"Ivory Coast\").population()\n\n for country in countryList:\n try:\n countriesPop[country] = CountryInfo(country).population()\n except KeyError:\n pass\n\n #remove unnecessary information from dataframes\n for count in range(len(dataFrames)):\n dataFrames[count] = dataFrames[count].drop(\"Province/State\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Lat\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Long\",axis=1)\n dataFrames[count] = dataFrames[count].rename(columns={\"Country/Region\": \"Country\"})\n dataFrames[count][\"Country\"] = dataFrames[count][\"Country\"].replace({\"Korea, South\": \"South Korea\"})\n dataFrames[count] = dataFrames[count].groupby(\"Country\").sum()\n\n # create per 100k capita values by dividing country data by population\n ConfirmedPC = dataFrames[0].copy()\n DeathsPC = dataFrames[1].copy()\n RecoveredPC = dataFrames[2].copy()\n countryList.append(\"South Korea\")\n\n for country in countryList:\n try:\n ConfirmedPC.loc[country] = ConfirmedPC.loc[country].divide(countriesPop[country]).multiply(100000) #confirmed cases per 100k inhabitants\n DeathsPC.loc[country] = DeathsPC.loc[country].divide(countriesPop[country]).multiply(100000) #deaths per 100k inhabitants\n RecoveredPC.loc[country] = RecoveredPC.loc[country].divide(countriesPop[country]).multiply(100000) #recovered cases per 100k inhabitants\n except KeyError:\n pass\n\n dataFrames.extend([ConfirmedPC, DeathsPC, RecoveredPC])\n\n return dataFrames, countryList", "def grade(population, target_sum, target_mult):\r\n summed = reduce(add, (fitness(x, target_sum, target_mult) for x in population), 0)\r\n return summed / len(population)", "def mortalidade(self):\n self.covidbr['mortalidade'] = self.covidbr['obitosAcumulado'] / \\\n (self.covidbr['populacaoTCU2019'] / (10**5))", "def calculate_continent_statistics(countries_df, group_col):\n continents_df = countries_df.drop(drop_cols, axis=1).groupby([group_col, 'WHO Region']).agg('mean').reset_index()\n continents_df['Country/Region'] = continents_df['WHO Region']\n continents_df['Population'] = population_data['Population'].sum()\n\n return continents_df", "def grade(population, targetSum, targetProduct):\n summed = reduce (add,(fitness(x, targetSum, targetProduct) for x in population), 0 )\n return summed / len(population)", "def _normalize(self, inp):\n \n return inp/inp.sum()", "def average_population_grade(population):\r\n total = 0\r\n for individual in population :\r\n total += get_individual_fitness(individual)\r\n return total/POPULATION_COUNT", "def gini_normalized(a, p):\n return gini(a, p) / gini(a, a)", "def normalize(self, type_range = 0):\n \n for i, chromosome in enumerate(self.population):\n self.population[i] = chromosome / np.sum(np.abs(chromosome)) * self.individual_type.gene_count\n\n #for i, chromosome in enumerate(self.population):\n # if type_range == 0:\n # self.population[i] = 2 * (chromosome - np.min(chromosome))/np.ptp(chromosome) - 1\n # elif type_range == 1:\n # self.population[i] = (chromosome - np.min(chromosome))/np.ptp(chromosome)", "def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total", "def denormalize(x):\n x_max = np.percentile(x, 98)\n x_min = np.percentile(x, 2)\n x = (x - x_min) / (x_max - x_min)\n x = x.clip(0, 1)\n return x", "def denormalize(x):\n x_max = np.percentile(x, 98)\n x_min = np.percentile(x, 2)\n x = (x - x_min) / (x_max - x_min)\n x = x.clip(0, 1)\n return x", "def _sum_up(param, country):\n if country not in COUNTRIES_TO_SUM:\n param = param[0]\n else:\n if not len(set(param)) > 1:\n param = param[0]\n else:\n if isinstance(param[0], float):\n param = sum(param)\n else:\n param = param[0]\n\n return param", "def normalize_probability(p_unnormalized):\n p_normalized=p_unnormalized/p_unnormalized.sum(axis=0)\n return p_normalized", "def combined_crude_rate(total_incidence, grouped_populations):\n grouped_population = list(map(sum, zip(*grouped_populations)))\n crude = list(map(lambda incidence: (incidence/grouped_population)*100000,\n total_incidence))\n combined_crude = pd.DataFrame((sum(crude)))\n return combined_crude", "def normalize_houndsfield(data_):\n cpy = data_ + 1024\n cpy /= 3000\n return cpy", "def relative_population(data, population, ageclass):\n total_pop = data[population].sum(axis=0)\n data['rel_pop'] = (data[population]/total_pop)*100000\n relative_pop = data.pivot(columns=ageclass, values='rel_pop').sum(axis=0)\n return relative_pop", "def modify_mig(country1,country2,countries,factor,mig,t=0):\n if (country1 in countries or country1 == 'all') and (country2 in countries or country2 == 'all') :\n if (type(factor) == int or type(factor) == float or factor.replace('.','',1).isdigit()) and float(factor) >=0.0:\n if (type(t) == int or t.isdigit()) and int(t) >= 0:\n t = int(t)\n factor = float(factor)\n if country1 == 'all':\n country1 = range(len(countries))\n else:\n country1 = [countries.index(country1)]\n if country2 == 'all':\n country2 = range(len(countries))\n else:\n country2 = [countries.index(country2)]\n for i in country1:\n for j in country2:\n mig[t:,i,j] = mig[t,i,j] * factor\n mig[t:,j,i] = mig[t,j,i] * factor\n return mig", "def per(a):\n return a * 100", "def normalise_single_iso(data, iso, lookup_table):\n normalisation_factor = lookup_table[1][iso]\n new_data = data / normalisation_factor\n return new_data", "def inflation (years, salaries):\n for i in range(len(salaries)):\n salaries[i][0] = float(salaries[i][0])/(0.368+0.025*(years[i][0]-2000))" ]
[ "0.5837532", "0.56250906", "0.55420405", "0.5459075", "0.53852814", "0.5379061", "0.5376545", "0.5342104", "0.5329432", "0.53068197", "0.5255592", "0.5225455", "0.5216194", "0.5173729", "0.5161948", "0.51232946", "0.5116454", "0.51086277", "0.5097741", "0.50803447", "0.50803447", "0.50708365", "0.50474375", "0.5039578", "0.5038169", "0.5032579", "0.50261873", "0.5010668", "0.5009721", "0.50075394" ]
0.66003275
0
Function that process the admission dataset, returning normalised weekly admission number for both normal and icu The weekly data is found by either 1. summing daily_norm, normalised by the population, of the same week 2. using the weekly norm
def process_admission(admission_df: pd.DataFrame): get_population = Populations() # Populations() is set up as a closure, enabling O(1) lookup admission_df['year_week'] = admission_df['year_week'].apply(get_yearweek) # change week string into tuple of ints admission_df['value'] = admission_df.apply(partial(normalize_admission_val, get_population), axis=1) # notmalise daily data by the population norm_admission = admission_df[(admission_df['indicator'] == ADMISSION_INDICATORS['daily_norm']) | (admission_df['indicator'] == ADMISSION_INDICATORS['weekly_norm'])].groupby(['country', 'year_week'], as_index=False).sum().rename(columns={'value': 'norm'}) icu_admission = admission_df[(admission_df['indicator'] == ADMISSION_INDICATORS['daily_icu']) | (admission_df['indicator'] == ADMISSION_INDICATORS['weekly_icu'])].groupby(['country', 'year_week'], as_index=False).sum().rename(columns={'value': 'icu'}) return pd.merge(norm_admission, icu_admission, how="outer", on=['country', 'year_week'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_admission_val(get_population: Callable, row: pd.Series) -> float:\n val = row['value']\n\n # Note that in the provided admission dataframe, only the daily data is given in absolute value\n if row['indicator'] in (ADMISSION_INDICATORS['weekly_norm'], ADMISSION_INDICATORS['weekly_icu']):\n return val\n\n return val * 100000 / get_population(row['country'])", "def getAbsNormalizationFactor(deltaE_wkspace,min,max):\n global reducer\n van_mass=reducer.get_default_parameter('vanadium-mass') \n \n Integration(InputWorkspace=deltaE_wkspace,OutputWorkspace='van_int',RangeLower=min,RangeUpper=max,IncludePartialBins='1')\n input_ws = mtd[deltaE_wkspace]\n ei_monovan = input_ws.getRun().getLogData(\"Ei\").value\n data_ws=mtd['van_int']\n nhist = data_ws.getNumberHistograms()\n #print nhist\n\n signal1_sum = 0.0\n weight1_sum = 0.0 \n signal2_sum = 0.0\n weight2_sum = 0.0 \n signal3_sum = 0.0\n weight3_sum = 0.0 \n signal4_sum = 0.0\n weight4_sum = 0.0 \n\n \n ic=0;\n izerc=0;\n for i in range(nhist):\n try:\n det = data_ws.getDetector(i)\n except Exception:\n continue\n if det.isMasked():\n continue\n\n signal = data_ws.readY(i)[0]\n error = data_ws.readE(i)[0]\n \n if signal != signal: #ignore NaN\n continue\n if ((error<=0) or (signal<=0)): # ignore Inf (0 in error are probably 0 in sign\n izerc+=1\n continue\n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i -- this what Libisis had\n weight = 1.0/error\n signal1_sum += signal * weight\n weight1_sum += weight \n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i^2\n weight2 = 1.0/(error*error)\n signal2_sum += signal * weight2\n weight2_sum += weight2 \n # Guess which assumes puassonian distribution with Err=Sqrt(signal) and calculates \n # the function: N_avrg = 1/(DetEfficiency_avrg^-1)*sum(n_i*DetEfficiency_i^-1)\n # where the DetEfficiency = WB_signal_i/WB_average WB_signal_i is the White Beam Vanadium \n # signal on i-th detector and the WB_average -- average WB vanadium signal. \n # n_i is the modified signal \n err_sq = error*error\n weight = err_sq/signal\n signal3_sum += err_sq\n weight3_sum += weight\n # Guess which estimatnes value sum(n_i^2/Sigma_i^2)/sum(n_i/Sigma_i^2) TGP suggestion from 12-2012\n signal4_sum += signal*signal/err_sq\n weight4_sum += signal/err_sq\n \n ic += 1 \n #print 'signal value =' ,signal\n #print 'error value =' ,error \n #print 'average ',signal_sum \n #---------------- Loop finished\n \n if( weight1_sum==0.0 or weight2_sum == 0.0 or weight3_sum == 0.0 or weight4_sum == 0.0) :\n print \"WB integral has been calculated incorrectrly, look at van_int workspace and input workspace: \",deltaE_wkspace\n raise IOError(\" divided by 0 weight\")\n \n integral_monovanLibISIS=signal1_sum / weight1_sum\n integral_monovanSigSq =signal2_sum / weight2_sum \n integral_monovanPuason =signal3_sum / weight3_sum \n integral_monovanTGP =signal4_sum / weight4_sum\n #integral_monovan=signal_sum /(wbVan_sum)\n van_multiplier = (float(reducer.van_rmm)/float(van_mass))\n absnorm_factorLibISIS = integral_monovanLibISIS * van_multiplier\n absnorm_factorSigSq = integral_monovanSigSq * van_multiplier \n absnorm_factorPuason = integral_monovanPuason * van_multiplier \n absnorm_factorTGP = integral_monovanTGP * van_multiplier \n #print 'Monovan integral :' ,integral_monovan \n \n if ei_monovan >= 210.0: \n xsection = 421 # vanadium cross-section in mBarn/sR (402 mBarn/Sr) (!!!modified to fit high energy limit?!!!)\n else: # old textbook cross-section for vanadium for ei=20mEv\n xsection = 400 + (ei_monovan/10) \n\n absnorm_factorLibISIS /= xsection\n absnorm_factorSigSq /= xsection \n absnorm_factorPuason /= xsection \n absnorm_factorTGP /= xsection \n \n sample_multiplier = (float(reducer.sample_mass)/float(reducer.sample_rmm))\n absnorm_factorLibISIS= absnorm_factorLibISIS *sample_multiplier\n absnorm_factorSigSq = absnorm_factorSigSq *sample_multiplier\n absnorm_factorPuason = absnorm_factorPuason *sample_multiplier\n absnorm_factorTGP = absnorm_factorTGP *sample_multiplier\n \n if (absnorm_factorLibISIS !=absnorm_factorLibISIS)|(izerc!=0): # It is an error, print diagnostics:\n if (absnorm_factorLibISIS !=absnorm_factorLibISIS):\n print '--------> Absolute normalization factor is NaN <----------------------------------------------'\n else:\n print '--------> Warning, Monovanadium has zero spectra <--------------------------------------------' \n print '--------> Processing workspace: ',deltaE_wkspace\n print '--------> Monovan Integration range : min=',min,' max=',max\n print '--------> Summarized: ',ic,' spectra with total value: ',signal2_sum, 'and total weight: ',weight2_sum\n print '--------> Dropped: ',izerc,' empty spectra'\n print '--------> Van multiplier: ',van_multiplier,' sample multiplier: ',sample_multiplier, 'and xsection: ',xsection \n print '--------> Abs norm factors: LibISIS: ',absnorm_factorLibISIS,' Sigma^2: ',absnorm_factorSigSq\n print '--------> Abs norm factors: Puasonian: ',absnorm_factorPuason, ' TGP: ',absnorm_factorTGP\n print '----------------------------------------------------------------------------------------------' \n else:\n DeleteWorkspace(Workspace=deltaE_wkspace)\n DeleteWorkspace(Workspace=data_ws)\n return (absnorm_factorLibISIS,absnorm_factorSigSq,absnorm_factorPuason,absnorm_factorTGP)", "def GetGraphicAverages(diagnostic_cases, diagnostic, weeks,year, n_years):\n t = 1.96\n\n current_year = Year.objects.get(year=year)\n weeks_current_year = weeks.filter(year=current_year)\n year_ob = Year.objects.filter(year__lt=year)\n weeks = weeks.filter(year__in=year_ob)\n\n popu = 0\n\n #cases per diagnostic\n diagnostic_cases_w = diagnostic_cases\n\n #arithmetic average of the weeks / n_years\n averages = [0] * 52\n\n standard_deviations = [0] * 52\n #number of years\n\n #cases per week of the diferent years\n cases_per_weeks = [0] * 52\n\n for i in range(len(averages)):\n\n f = [0]*(n_years)\n \n\n year = 0\n\n y_idx = 0\n for w in range(len(weeks)):\n #print(y)\n if weeks[w].week == i+1:\n \n if year != weeks[w].year: # Esto no pasa nunca\n year = weeks[w].year\n cases = 0\n \n \n for p in diagnostic_cases_w:\n\n if p.week == weeks[w]:\n \n cases += p.cases\n\n f[y_idx ] = cases\n y_idx +=1\n\n averages[i] = np.average(f) #borrar\n\n standard_deviations[i] = np.std(f)\n \n cases = 0\n for week in weeks_current_year:\n if week.week == i+1:\n dia = diagnostic_cases.filter(week=week)\n \n for d in dia:\n\n cases += d.cases\n\n cases_per_weeks[i] = cases \n\n\n #array of class dots for draw the chart of averages\n dots_graphic_averages = []\n #array of class dots for draw the chart of cumulative\n dots_graphic_cumulative = []\n\n\n average_cumulative = 0\n top_rank_cumulative = 0\n cases_acumulative = 0\n lower_rank_cumulative = 0\n\n for i in range(len(standard_deviations)):\n lower_rank = 0\n top_rank = 0\n\n if n_years != 0:\n lower_rank = averages[i] - (t * standard_deviations[i]/ math.sqrt(n_years))\n top_rank = averages[i] + (t * standard_deviations[i] / math.sqrt(n_years))\n if lower_rank < 0:\n lower_rank = 0\n\n # Acumulative dots\n cases_acumulative += cases_per_weeks[i]\n average_cumulative += averages[i]\n if lower_rank >= 0:\n lower_rank_cumulative += lower_rank\n top_rank_cumulative += top_rank\n\n dots_average = DotsGraphicAverage(averages[i],i+1, lower_rank, top_rank,cases_per_weeks[i])\n dots_cumulative = DotsGraphicAverage(average_cumulative,i+1, lower_rank_cumulative, top_rank_cumulative,cases_acumulative)\n dots_graphic_averages.append(dots_average)\n dots_graphic_cumulative.append(dots_cumulative)\n\n\n return dots_graphic_averages, dots_graphic_cumulative", "def _calculate_u_w_OLD(file_input, duration_steps, measurement_period, series_kind):\n # check(COL.START)\n ts = file_input.copy()\n # ts = ts.dropna()\n base_frequency = guess_freq(file_input.index) # DateOffset/Timedelta\n ts = ts.resample(base_frequency).sum()\n\n # ts = ts.asfreq(base_frequency)\n # ts = ts.fillna(0)\n\n # ------------------------------------------------------------------------------------------------------------------\n def _calc_overlapping_sum_max(event, duration):\n \"\"\"\n calculation of the maximum of the overlapping sum of the series\n acc. to DWA-A 531 chap. 4.2\n\n Args:\n event (pandas.Series): event with index=[start, end]\n duration (pandas.Timedelta): of the calculation step\n\n Returns:\n float: maximum of the overlapping sum\n \"\"\"\n data = ts.loc[event[COL.START]:event[COL.END]].copy()\n interval = int(round(duration / base_frequency))\n\n # correction factor acc. to DWA-A 531 chap. 4.3\n improve = [1.140, 1.070, 1.040, 1.030]\n\n if interval == 1:\n return data.max() * improve[0]\n\n data = data.rolling(window=interval).sum()\n\n if interval > 4:\n return data.max()\n else:\n return data.max() * improve[interval - 1]\n\n # ------------------------------------------------------------------------------------------------------------------\n interim_results = pd.DataFrame(index=duration_steps, columns=['u', 'w'], dtype=float)\n interim_results.index.name = 'duration'\n\n # acc. to DWA-A 531 chap. 4.2:\n # The values must be independent of each other for the statistical evaluations.\n # estimated four hours acc. (SCHILLING 1984)\n # for larger durations - use the duration as minimal gap\n minimal_gap = pd.Timedelta(hours=4)\n # check('Events')\n events = rain_events(file_input, ignore_rain_below=0.0, min_gap=minimal_gap)\n # check(' - done')\n for duration_index in duration_steps:\n print(duration_index)\n duration = pd.Timedelta(minutes=duration_index)\n if duration > minimal_gap:\n events = rain_events(file_input, ignore_rain_below=0.0, min_gap=duration)\n\n # check('osum')\n events[COL.MAX_OVERLAPPING_SUM] = events.apply(_calc_overlapping_sum_max, axis=1, duration=duration)\n\n # check('series calc')\n if series_kind == ANNUAL:\n interim_results.loc[duration_index] = annual_series(events)\n elif series_kind == PARTIAL:\n interim_results.loc[duration_index] = partial_series(events, measurement_period)\n else:\n raise NotImplementedError\n # check(' - done')\n return interim_results", "def test_weekly_resolution_perfect_model(daily_initialized, daily_obs):\n weekly_pm = daily_initialized.resample(init=\"W\").mean()\n weekly_obs = daily_obs.resample(time=\"W\").mean()\n weekly_pm.lead.attrs[\"units\"] = \"weeks\"\n assert compute_hindcast(weekly_pm, weekly_obs).all()", "def process_data_p1(data):\r\n return data[[\"CONTROL\", \"Academic Year\", \"MD_EARN_WNE_P10\"]] \\\r\n .groupby([\"CONTROL\", \"Academic Year\"], as_index=False).mean()", "def data_read(variable):\t\t\r\n\tdef day2datetime(scenario,days):\r\n\t\t\"\"\"\r\n\t\t# convert days from a reference into int datetime \r\n\t\t# do not take leap years into account\r\n\t\t\"\"\"\r\n\t\tdate_int = np.empty((len(days)));date_int[:]=np.nan\r\n\t\tif scenario =='T1970C': start_year =1970\r\n\t\telse: start_year =2010\r\n\t\tstart =(start_year*365)\r\n\t\tith=0\t\r\n\t\tfor iday in days:\r\n\t\t\tmonth_days =np.array([31,28,31,30,31,30,31,31,30,31,30,31])\r\n\t\t\tcalendar_days = np.array([0,31,59,90,120,151,181,212,243,273,304,334,365])\r\n\t\t\ttotal_days = int(iday) + start; \r\n\t\t\tyear = total_days//365; \r\n\t\t\tremainder = total_days%365\r\n\t\t\tif remainder ==0: year=year-1;month=12;day=31\r\n\t\t\telse: \r\n\t\t\t\tmonth = 1+[layer for layer in range(len(calendar_days)) if calendar_days[layer]< remainder and calendar_days[layer+1]>=remainder][0]\r\n\t\t\t\tday = int(remainder - calendar_days[month-1])\r\n\t\t\t\tif day == 0: day = month_days[month-1]\r\n\t\t\tdate_int[ith] = year*10000+month*100+day\r\n\t\t\tith=ith+1\r\n\t\treturn date_int.astype(int)\r\n\t\t\r\n\tdef mon_mean2annual_mean(scenario,time,data):\r\n\t\tannual_mean=np.empty((30,192,288));annual_mean[:]=np.nan\r\n\t\tcalendar_day = np.array([31,28,31,30,31,30,31,31,30,31,30,31])\r\n\t\tif scenario=='T1970RCP':\r\n\t\t\tyear_series = range(2020,2050)\r\n\t\telif scenario=='EdgEne':\r\n\t\t\tyear_series = range(2200,2230)\r\n\t\telif scenario=='Edg70GO':\r\n\t\t\tyear_series = range(2070,2100)\r\n\t\telse:\r\n\t\t\tyear_series = range(2130,2160)\r\n\t\tfor iyear in year_series:\r\n\t\t\t\r\n\t\t\tif (iyear == year_series[0] and time[0]//100 >= year_series[0] *100+1):\r\n\t\t\t\tlayer_b=0\r\n\t\t\telse:\r\n\t\t\t\tlayer_b = [layer for layer in range(len(time)) if time[layer]//100 == iyear*100+1][0] #June01\r\n\t\t\tif (iyear == year_series[-1] and time[-1]//100 <= year_series[-1] *100+12):\r\n\t\t\t\tlayer_e=-2\r\n\t\t\telse:\r\n\t\t\t\tlayer_e = [layer for layer in range(len(time)) if time[layer]//100 == iyear*100+12][0] #August 31\r\n\t\t\tdata_cache = data[layer_b:layer_e+1,:,:]\r\n\t\t\tannual_mean[iyear-year_series[0],:,:] = stats.nanmean(data_cache,axis=0)\r\n\t\treturn annual_mean\r\n\r\n\tdef data_netcdf(scenario,variable):\r\n\t\tinput_path ='/exports/csce/datastore/geos/users/s1667168/CESM_EDGAR/ModelOutput/FullCp/'\r\n\t\tvar_path = input_path+scenario+'/mon/atm/'+scenario+'.atm.mon.'+variable+'.nc'\r\n\t\t# print var_path\r\n\t\tnc_fid = nc4.Dataset(var_path,mode='r')\r\n\t\tlat = nc_fid.variables['lat'][:]\r\n\t\tlon = nc_fid.variables['lon'][:]\r\n\t\tdays = nc_fid.variables['time'][:]; time = day2datetime(scenario,days);#print time\r\n\t\tif variable ==\"VQ\" or variable == \"VT\":\r\n\t\t\tdata = np.nanmean(nc_fid.variables[variable][:,23:30,:,:],axis=1) # 850hpa\r\n\t\telse:\r\n\t\t\tdata = nc_fid.variables[variable][:]#-273.15\r\n\t\tnc_fid.close()\r\n\t\tvar40map = mon_mean2annual_mean(scenario,time,data)\r\n\t\treturn lon,lat,var40map\r\n\t\r\n\tlon,lat,Edg70GO = data_netcdf('Edg70GO',variable)\r\n\t_,_,T1970 = data_netcdf('T1970RCP',variable)\r\n\t_,_,EdgRef = data_netcdf('EdgRef',variable)\r\n\t_,_,Edg70Oz = data_netcdf('Edg70Oz',variable)\r\n\t_,_,EdgEne = data_netcdf('EdgEne',variable)\r\n\t_,_,EdgTech = data_netcdf('EdgTech',variable)\r\n\treturn lon,lat,T1970,Edg70GO,Edg70Oz,EdgRef,EdgEne,EdgTech", "def _compute_W():\n if penalty == \"consensus\":\n W = 1.0 * np.array(\n [[0, 1, 0, 1, 1],\n [0, 0, 1, 0, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 0, 0, 0],\n [1, 1, 1, 1, 0],\n [0, 0, 1, 0, 0]]\n )\n elif penalty in ['var', 'std']:\n W = np.empty((6, 5))\n for i, _ in enumerate(df_main.iterrows()):\n for j in range(5):\n vals = [df.iloc[i, j] for df in dfs]\n W[i, j] = np.std(vals)\n\n if penalty == 'var':\n W = W ** 2\n W = 1 / W\n else:\n W = np.ones((6, 5))\n\n return W / W.sum(axis=1).reshape((-1, 1))", "def norm_agg_payments(df):\n\n sum_cols = ['num_hcpcs',\n 'num_services',\n 'total_submitted_charges',\n 'total_medicare_allowed_amt',\n 'total_medicare_payment_amt',\n #'num_hcpcs_associated_drug_srvc',\n #'num_drug_srvc',\n #'num_unique_bene_with_drug_srvc',\n 'total_drug_submitted_charges', \n 'total_drug_medicare_allowed_amt',\n 'total_drug_medicare_payment_amt',\n 'num_hcpcs_associated_med_srvc',\n 'num_med_srvc', \n 'num_unique_bene_with_med_srvc', \n 'total_med_submitted_charges',\n 'total_med_medicare_allowed_amt', \n 'total_med_medicare_payment_amt',\n 'num_bene_le65',\n 'num_bene_65to74',\n 'num_bene_75to84',\n 'num_bene_ge84', \n 'num_female', \n 'num_male',\n 'num_non_his_white',\n 'num_african_american',\n 'num_asian',\n 'num_hispanic', \n 'num_american_indian', \n #'num_no_race',\n 'num_asthma', \n 'num_alzheimers_dementia',\n 'num_artrial_fibrillation',\n 'num_cancer',\n 'num_chronic_obstructive_pulmonary',\n 'num_depression',\n 'num_diabetes',\n 'num_heart_failure',\n 'num_hypertension',\n 'num_ischemic_heart',\n 'num_osteoporosis',\n 'num_rheumatoid_arthritis_osteoarthirtis',\n 'num_schizophrenia_psychotic',\n 'num_stroke',\n 'total_age',\n 'total_hcc_risk'\n ]\n for name in sum_cols:\n df['{}_norm'.format(name)] = df[name].divide(df['num_unique_bene'])", "def calc_raws(df, unit='leader'):\n\n df['distrust'] = df['HDIS']/(df['HDIS']+df['LDIS'])\n df['task'] = df['HTASK']/(df['HTASK']+df['LTASK'])\n df['bace'] = df['IC']/(df['IC']+df['EC'])\n df['igb'] = df['HBIAS']/(df['HBIAS']+df['LBIAS'])\n df['sc'] = df['HSC']/(df['HSC']+df['LSC'])\n df['cc'] = df['HCC']/(df['HCC']+df['LCC'])\n df['power'] = df['HPWR']/(df['HPWR']+df['LPWR'])\n\n df['i1'] = df.apply(i1_func, axis=1)\n df['i2'] = df.apply(i2_func, axis=1)\n df['i3'] = df.apply(i3_func, axis=1)\n\n df['i4a'] = df.apply(i4a_func, axis=1)\n df['i4b'] = df.apply(i4b_func, axis=1)\n df['i5ap'] = df.apply(i5ap_func, axis=1)\n df['i5pr'] = df.apply(i5pr_func, axis=1)\n df['i5re'] = df.apply(i5re_func, axis=1)\n df['i5op'] = df.apply(i5op_func, axis=1)\n df['i5th'] = df.apply(i5th_func, axis=1)\n df['i5pu'] = df.apply(i5pu_func, axis=1)\n\n df['p1'] = df.apply(p1_func, axis=1)\n df['p2'] = df.apply(p2_func, axis=1)\n df['p3'] = df.apply(p3_func, axis=1)\n df['p4'] = df.apply(p4_func, axis=1)\n df['p5'] = df.apply(p5_func, axis=1)\n\n if unit == 'leader':\n\n keep = ['firstname', 'lastname', 'name', 'Ccode', 'vcount', 'distrust', 'task',\n 'bace', 'igb', 'sc', 'cc', 'power', 'i1', 'i2', 'i3', 'i4a', 'i4b',\n 'i5ap', 'i5pr', 'i5re', 'i5op', 'i5th', 'i5pu', 'p1', 'p2', 'p3', 'p4',\n 'p5']\n\n df = df[keep]\n\n elif unit == 'year':\n\n keep = ['firstname', 'lastname', 'name', 'year', 'Ccode', 'vcount', 'distrust', 'task',\n 'bace', 'igb', 'sc', 'cc', 'power', 'i1', 'i2', 'i3', 'i4a', 'i4b',\n 'i5ap', 'i5pr', 'i5re', 'i5op', 'i5th', 'i5pu', 'p1', 'p2', 'p3', 'p4',\n 'p5']\n\n df = df[keep]\n\n elif unit == 'month':\n\n keep = ['firstname', 'lastname', 'name', 'yr_month','Ccode', 'vcount', 'distrust', 'task',\n 'bace', 'igb', 'sc', 'cc', 'power', 'i1', 'i2', 'i3', 'i4a', 'i4b',\n 'i5ap', 'i5pr', 'i5re', 'i5op', 'i5th', 'i5pu', 'p1', 'p2', 'p3', 'p4',\n 'p5']\n\n df = df[keep]\n\n df['year'] = df['yr_month'].apply(lambda x: x.split('-')[0])\n df['month'] = df['yr_month'].apply(lambda x: x.split('-')[1])\n\n elif unit == 'quarter':\n\n keep = ['firstname', 'lastname', 'name', 'yr_quarter', 'Ccode', 'vcount', 'distrust', 'task',\n 'bace', 'igb', 'sc', 'cc', 'power', 'i1', 'i2', 'i3', 'i4a', 'i4b',\n 'i5ap', 'i5pr', 'i5re', 'i5op', 'i5th', 'i5pu', 'p1', 'p2', 'p3', 'p4',\n 'p5']\n\n df = df[keep]\n\n df['year'] = df['yr_quarter'].apply(lambda x: x.split('-')[0])\n df['quarter'] = df['yr_quarter'].apply(lambda x: x.split('-')[1])\n\n return df", "def DW_cal(data, data_sm):\n n = len(data)\n numerator = 0\n denominator = 0\n for i in range(n):\n if i == 0:\n numerator = numerator + 0\n else:\n numerator = numerator + ((data[i] - data_sm[i]) - (data[i-1] - data_sm[i-1]))**2\n denominator = denominator + (data[i] - data_sm[i])**2\n return numerator/denominator*n/(n - 1)", "def calculate_clim_anoms(var, var_dates):\n d_counts=[]\n var_clim = np.zeros_like(var)\n var_climstd = np.zeros_like(var)\n for m in range(1,13): #for each month\n mo_ind = (var_dates[1,:]==m)\n day_options = np.unique(var_dates[2,mo_ind])\n \n #print(day_options) #for diagnostics \n for d in range(0,np.size(day_options)): #for each possible day\n d_ind = (mo_ind) & (var_dates[2,:]==day_options[d])\n\n var_days = var[:,:,d_ind]\n var_daysav = np.nanmean(var_days,2)\n var_daysstd = np.nanstd(var_days,2)\n \n var_clim[:,:,d_ind] = np.transpose(np.tile(var_daysav,(np.sum(d_ind),1,1)),(1,2,0))\n var_climstd[:,:,d_ind] = np.transpose(np.tile(var_daysstd,(np.sum(d_ind),1,1)),(1,2,0))\n \n d_counts.append(np.sum(d_ind)) #this is just for diagnostics\n \n var_anom = var - var_clim\n var_anom_scaled = var_anom/var_climstd\n \n return var_anom, var_anom_scaled;", "def annual_summary(self):\n \n #Initialize dict with info about all of year's storms\n hurdat_year = {'id':[],'operational_id':[],'name':[],'max_wspd':[],'min_mslp':[],'category':[],'ace':[]}\n \n #Search for corresponding entry in keys\n count_ss_pure = 0\n count_ss_partial = 0\n iterate_id = 1\n for key in self.dict.keys():\n\n #Retrieve info about storm\n temp_name = self.dict[key]['name']\n temp_vmax = np.array(self.dict[key]['vmax'])\n temp_mslp = np.array(self.dict[key]['mslp'])\n temp_type = np.array(self.dict[key]['type'])\n temp_time = np.array(self.dict[key]['date'])\n temp_ace = self.dict[key]['ace']\n\n #Get indices of all tropical/subtropical time steps\n idx = np.where((temp_type == 'SS') | (temp_type == 'SD') | (temp_type == 'TD') | (temp_type == 'TS') | (temp_type == 'HU'))\n\n #Get times during existence of trop/subtrop storms\n if len(idx[0]) == 0: continue\n trop_time = temp_time[idx]\n if 'season_start' not in hurdat_year.keys():\n hurdat_year['season_start'] = trop_time[0]\n hurdat_year['season_end'] = trop_time[-1]\n\n #Get max/min values and check for nan's\n np_wnd = np.array(temp_vmax[idx])\n np_slp = np.array(temp_mslp[idx])\n if len(np_wnd[~np.isnan(np_wnd)]) == 0:\n max_wnd = np.nan\n max_cat = -1\n else:\n max_wnd = int(np.nanmax(temp_vmax[idx]))\n max_cat = convert_category(np.nanmax(temp_vmax[idx]))\n if len(np_slp[~np.isnan(np_slp)]) == 0:\n min_slp = np.nan\n else:\n min_slp = int(np.nanmin(temp_mslp[idx]))\n\n #Append to dict\n hurdat_year['id'].append(key)\n hurdat_year['name'].append(temp_name)\n hurdat_year['max_wspd'].append(max_wnd)\n hurdat_year['min_mslp'].append(min_slp)\n hurdat_year['category'].append(max_cat)\n hurdat_year['ace'].append(temp_ace)\n hurdat_year['operational_id'].append(self.dict[key]['operational_id'])\n \n #Handle operational vs. non-operational storms\n\n #Check for purely subtropical storms\n if 'SS' in temp_type and True not in np.isin(temp_type,['TD','TS','HU']):\n count_ss_pure += 1\n\n #Check for partially subtropical storms\n if 'SS' in temp_type:\n count_ss_partial += 1\n\n #Add generic season info\n hurdat_year['season_storms'] = len(hurdat_year['name'])\n narray = np.array(hurdat_year['max_wspd'])\n narray = narray[~np.isnan(narray)]\n hurdat_year['season_named'] = len(narray[narray>=34])\n hurdat_year['season_hurricane'] = len(narray[narray>=65])\n hurdat_year['season_major'] = len(narray[narray>=100])\n hurdat_year['season_ace'] = np.sum(hurdat_year['ace'])\n hurdat_year['season_subtrop_pure'] = count_ss_pure\n hurdat_year['season_subtrop_partial'] = count_ss_partial\n \n #Return object\n return hurdat_year", "def compute_dstats(data, icol=0, nmin=None, nmax=None, normed=False):\n dims = len(data.shape)\n records = data[:, icol] if dims > 1 else data\n _, data_stats = np.unique(records, return_counts=True)\n # the distribution of the statistics\n stats, counts = np.unique(data_stats, return_counts=True)\n del data_stats\n out = pd.Series(data=counts, index=stats)\n if normed:\n out = out / out.sum()\n return out.loc[nmin:nmax]", "def normalize_weather_data(self, weather_data):\n\n feat_means = np.array([self.summary[field][0] for field in self.weather_fields])\n feat_stds = np.array([self.summary[field][1] for field in self.weather_fields])\n\n # Standardize\n weather_standardized = (weather_data - feat_means) / feat_stds\n\n return weather_standardized", "def normalise_single_iso(data, iso, lookup_table):\n normalisation_factor = lookup_table[1][iso]\n new_data = data / normalisation_factor\n return new_data", "def _unnormalized_transform(self):\n return self.n_ds + self.doc_sentiment_prior_", "def manipulate_data(ds, var, predef_clim, predef_trnd, trn_yrs, all_yrs, \n apply_latw=True, apply_detrending=True, dropna=True):\n\n \n if((var=='SD')|(var=='sd')|(var=='snowc')): \n ds[var] = ds[var].where(ds[var]>=0, other=0.0)\n ds[var] = ds[var].where(ds[var]==0, other=1.0)\n #ds[var].values = Gauss_filter(ds[var].values, (0,3,3))\n \n \"\"\"\n if((var=='hgt')|(var=='z')|(var=='GPT')):\n months = ds.time.to_index().month; ssn_ends = (months==2)|(months==5)|(months==8)|(months==11)\n ds = ds.sel(time=ssn_ends)\n else: \n ds = ds.resample(time='3M').mean()\n \"\"\"\n \n ds = ds.resample(time='3M').mean()\n\n ds = ds.sel(time=slice(str(all_yrs[0])+'-01-01', str(all_yrs[-1])+'-12-31')) \n \n try: \n clim = predef_clim\n ds = ds.groupby('time.season') - clim\n print('Predefined climatology used')\n except:\n clim = ds.sel(time=slice(str(trn_yrs[0])+'-01-01', str(trn_yrs[-1])+'-12-31')).groupby('time.season').mean('time')\n ds = ds.groupby('time.season') - clim\n print('Climatology calculated from data')\n \n if(apply_latw): ds[var].values = lat_weighting(ds[var].values, \n ds.lat, ds.lon)\n if(dropna):\n ds = ds.stack(gridcell=('lat', 'lon')).dropna(dim='gridcell',how='any')\n else: \n ds = ds.stack(gridcell=('lat', 'lon')).fillna(0)\n \n \n trend_models = { }\n if(apply_detrending): \n ds = ds.load()\n for ssn in ('DJF', 'MAM', 'JJA', 'SON'):\n #ssn_idx = ds['time.season'] == ssn\n \n trn_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], trn_yrs))\n all_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], all_yrs))\n \n trn_x = np.array(ds.time[trn_idx].values.tolist()).reshape(-1,1)\n all_x = np.array(ds.time[all_idx].values.tolist()).reshape(-1,1)\n try:\n trend = predef_trnd[ssn].predict(all_x)\n trend_models[ssn] = predef_trnd[ssn]\n print('Predefined trend model used')\n except:\n #_, trend_model = define_trends(ds[var][trn_idx], trn_x)\n _, trend_model = define_trends(ds[var][all_idx], all_x)\n trend = trend_model.predict(all_x)\n trend_models[ssn] = trend_model\n print('Trends calculated from data')\n \n ds[var][all_idx] = ds[var][all_idx] - trend\n \n\n \n return ds, clim, trend_models", "def marcels_players(goalie, date, df):\n # 0 = that year, 1 is year b4 ....\n marcel_weights = [.36, .29, .21, .14]\n reg_const = 2000\n reg_avg = 0 # Where to regress to\n\n # Use past 3 season to weight games played -> Just take weighted average\n gp_weights = [8, 4, 2, 0]\n\n season = int(helpers.get_season(date))\n\n weighted_goals_sum, weighted_fen_sum, weighted_xg_sum, weights_marcel_sum = 0, 0, 0, 0\n weighted_gp_sum, weights_gp_sum = 0, 0\n\n # Past 4 Seasons\n for i in range(0, 4):\n if season - i > 2006:\n # Subset from stats df\n df_goalie = df[(df['player'] == goalie) & (df['season'] == (season - i))]\n\n # Sanity Check\n if df_goalie.shape[0] > 1:\n print(\"Too many rows!!!!!!!\")\n exit()\n\n # If he played that year\n if not df_goalie.empty:\n weighted_goals_sum += df_goalie.iloc[0]['goals_a'] * marcel_weights[i]\n weighted_fen_sum += df_goalie.iloc[0]['fenwick_a'] * marcel_weights[i]\n weighted_xg_sum += df_goalie.iloc[0]['xg_a'] * marcel_weights[i]\n weighted_gp_sum += df_goalie.iloc[0]['games'] * gp_weights[i]\n\n # -> To divide by at end...normalize everything\n weights_marcel_sum += marcel_weights[i]\n weights_gp_sum += gp_weights[i]\n\n # Normalize weighted sums\n weighted_xg_sum = weighted_xg_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_goals_sum = weighted_goals_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_fen_sum = weighted_fen_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n\n # Get Regressed fsv%\n if weighted_fen_sum != 0:\n weighted_adj_fsv = ((1 - weighted_goals_sum / weighted_fen_sum) - (1 - weighted_xg_sum / weighted_fen_sum)) * 100\n else:\n weighted_adj_fsv = 0\n reg_adj_fsv = weighted_adj_fsv - ((weighted_adj_fsv - reg_avg) * (reg_const / (reg_const + weighted_fen_sum)))\n\n # Get weighted gp\n weighted_gp_sum = weighted_gp_sum / weights_gp_sum if weights_gp_sum != 0 else 0\n\n return {'fsv': reg_adj_fsv, 'gp': weighted_gp_sum}", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def normalize(row):\n study = row['study']\n val = row[key]\n group_mean = df.groupby('study').mean().loc[study,key]\n group_std = df.groupby('study').std().loc[study,key]\n zval = (val - group_mean) / group_std\n return zval", "def test_weekly_resolution_hindcast(daily_initialized, daily_obs):\n weekly_hindcast = daily_initialized.resample(init=\"W\").mean()\n weekly_obs = daily_obs.resample(time=\"W\").mean()\n weekly_hindcast.lead.attrs[\"units\"] = \"weeks\"\n assert compute_hindcast(weekly_hindcast, weekly_obs).all()", "def calculate_u_w(file_input, duration_steps, measurement_period, series_kind):\n ts = file_input.copy()\n base_frequency = guess_freq(file_input.index) # DateOffset/Timedelta\n\n # ------------------------------------------------------------------------------------------------------------------\n interim_results = dict()\n\n # -------------------------------\n # acc. to DWA-A 531 chap. 4.2:\n # The values must be independent of each other for the statistical evaluations.\n # estimated four hours acc. (SCHILLING 1984)\n # for larger durations - use the duration as minimal gap\n #\n # use only duration for splitting events\n # may increase design-rain-height of smaller durations\n #\n # -------------------------------\n\n for duration_integer in duration_steps:\n duration = pd.Timedelta(minutes=duration_integer)\n\n if duration < pd.Timedelta(base_frequency):\n continue\n\n events = rain_events(file_input, min_gap=duration)\n\n # correction factor acc. to DWA-A 531 chap. 4.3\n improve = _improve_factor(duration / base_frequency)\n\n events[COL.MAX_OVERLAPPING_SUM] = agg_events(events, ts.rolling(duration).sum(), 'max') * improve\n\n if series_kind == ANNUAL:\n interim_results[duration_integer] = annual_series(events)\n elif series_kind == PARTIAL:\n interim_results[duration_integer] = partial_series(events, measurement_period)\n else:\n raise NotImplementedError\n\n # -------------------------------\n interim_results = pd.DataFrame.from_dict(interim_results, orient='index')\n interim_results.index.name = 'duration'\n return interim_results", "def data_averaging_and_cleaning(self):\n groups, film, plank = self.data_grouping()\n\n for i in groups:\n self.organized_names.append(input('Enter label name for condition ' + str(i)))\n\n self.organized_film.append(sum(film[groups.index(i)]) / len(film[groups.index(i)]))\n try:\n self.organized_plank.append(sum(film[groups.index(i)]) / (sum(film[groups.index(i)]) +\n sum(plank[groups.index(i)])))\n except ZeroDivisionError:\n self.organized_plank.append(sum(film[groups.index(i)]) / 1)", "def unicef_data():\n workbook = xlrd.open_workbook('unicef_oct_2014.xlsx')\n sheet = workbook.sheets()[0]\n\n title_rows = zip(sheet.row_values(4), sheet.row_values(5))\n titles = [t[0] + ' ' + t[1] for t in title_rows]\n titles = [t.strip() for t in titles]\n\n country_rows = [sheet.row_values(r) for r in range(6, 114)]\n cleaned_rows = []\n\n for row in country_rows:\n cleaned_row = [remove_bad_chars(rv) for rv in row]\n cleaned_rows.append(cleaned_row)\n\n example_row = sheet.row(6)\n types = get_types(example_row)\n\n table = agate.Table(cleaned_rows, titles, types)\n ranked = table.compute([('Total Child Labor Rank',\n agate.Rank('Total (%)', reverse=True)), ])\n\n return ranked", "def return_weekly_figure():\n today = datetime.datetime.now()\n\n while 1:\n try:\n today_str = str(today.day) + \"/\" + \"{:02d}\".format(today.month) + \"/\" + str(today.year)\n match = covid_table.find(date=today_str)\n match.next()\n running_total = 0\n for i in range(7):\n running_total += return_daily_figure(today)\n today = today - datetime.timedelta(days=1)\n average_dose_per_day = round(running_total/7)\n return running_total, average_dose_per_day \n except:\n today = today - datetime.timedelta(days=1)", "def _normalise(group, clim_group):\n month = group[time_name].dt.month.values[0]\n months, _ = zip(*list(clim_group))\n clim_group_month = list(clim_group)[months.index(month)][1]\n return (group - clim_group_month.mean(time_name)) / clim_group_month.std(time_name)", "def mann_whitney_plus_means(turnstile_weather):\n with_rain = turnstile_weather[turnstile_weather.rain == 1]\n without_rain = turnstile_weather[turnstile_weather.rain == 0]\n\n with_rain_mean = with_rain['ENTRIESn_hourly'].mean()\n without_rain_mean = without_rain['ENTRIESn_hourly'].mean()\n U, p = scipy.stats.mannwhitneyu(with_rain['ENTRIESn_hourly'], without_rain['ENTRIESn_hourly'])\n\n return with_rain_mean, without_rain_mean, U, p", "def normalise(da):\n return (da - da.min()) / (da.max() - da.min())", "def weekly(evictiondata):\r\n evictions_per_week = {}\r\n for index, row in evictiondata.iterrows():\r\n if row['week_date'] not in evictions_per_week.keys():\r\n evictions_per_week[row['week_date']] = row['filings_2020']\r\n else:\r\n evictions_per_week[row['week_date']] += row['filings_2020']\r\n return evictions_per_week" ]
[ "0.57838684", "0.5630602", "0.53852296", "0.5338406", "0.5294488", "0.5282174", "0.5200791", "0.51800346", "0.5177649", "0.5170467", "0.5109907", "0.51097554", "0.5109729", "0.51078874", "0.51069164", "0.5098266", "0.5095416", "0.5071187", "0.50578314", "0.5039682", "0.50048625", "0.500447", "0.5002896", "0.49938023", "0.49935907", "0.4992634", "0.4989269", "0.49882802", "0.49875954", "0.49758467" ]
0.635556
0
Saves our custom form fields in the event.
def _save_extra_fields(self, event): term = self.cleaned_data["term_name"] week = self.cleaned_data["term_week"] day = self.cleaned_data["day_of_week"] year = int(settings.DEFAULT_ACADEMIC_YEAR) date = datetimes.termweek_to_date(year, term, week, day) start_hour = self.cleaned_data["start_hour"] start_minute = self.cleaned_data["start_minute"] end_hour = self.cleaned_data["end_hour"] end_minute = self.cleaned_data["end_minute"] tz = timezone.get_current_timezone() start_naive = datetime.datetime(date.year, date.month, date.day, start_hour, start_minute) event.start = tz.localize(start_naive) end_naive = datetime.datetime(date.year, date.month, date.day, end_hour, end_minute) event.end = tz.localize(end_naive) event.metadata["people"] = self.cleaned_data["people"] event.metadata["type"] = self.cleaned_data["event_type"] if self.cleaned_data["cancel"] is True: event.status = models.Event.STATUS_CANCELLED else: event.status = models.Event.STATUS_LIVE
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_post(self):\n return \"Ok, the stuff is being saved\"", "def post_save(self, request, instance, instance_data, created): # NOQA: C901\n # import wdb; wdb.set_trace()\n if \"appendix_table\" in instance_data:\n for data in instance_data[\"appendix_table\"]:\n self._save_sub_form(\n Appendix, AppendixForm, data, visit_report_instance=instance\n )\n\n if \"faces\" in instance_data:\n for data in instance_data[\"faces\"]:\n self._save_sub_form(\n Face, FaceForm, data, visit_report_instance=instance\n )\n\n if \"steps\" in instance_data:\n for data in instance_data[\"steps\"]:\n self._save_sub_form(\n Step, StepForm, data, visit_report_instance=instance\n )\n\n if \"scenarios\" in instance_data:\n for data in instance_data[\"scenarios\"]:\n sub_instance = self._save_sub_form(\n Scenario, ScenarioForm, data, visit_report_instance=instance\n )\n\n if \"financial_aids\" in data:\n for sub_data in data[\"financial_aids\"]:\n self._save_sub_form(\n FinancialAid,\n FinancialAidForm,\n sub_data,\n scenario_instance=sub_instance,\n )\n\n if \"financings\" in data:\n for sub_data in data[\"financings\"]:\n self._save_sub_form(\n Financing,\n FinancingForm,\n sub_data,\n scenario_instance=sub_instance,\n )\n\n if \"scenario_summaries\" in data:\n for sub_data in data[\"scenario_summaries\"]:\n self._save_sub_form(\n ScenarioSummary,\n ScenarioSummaryForm,\n sub_data,\n scenario_instance=sub_instance,\n )\n\n if \"systems\" in instance_data:\n for data in instance_data[\"systems\"]:\n self._save_sub_form(\n System, SystemForm, data, visit_report_instance=instance\n )\n\n if \"work_recommendations\" in instance_data:\n for data in instance_data[\"work_recommendations\"]:\n self._save_sub_form(\n WorkRecommendation,\n WorkRecommendationForm,\n data,\n visit_report_instance=instance,\n )\n\n return", "def save(self, *args, **kwargs):\n step_numeral, step_name = kwargs.pop('step', (None, None))\n\n if step_numeral == 1:\n \"\"\"\n Basic Form: Application & File Uploader\n \"\"\"\n return self.cleaned_data\n if step_numeral == 2:\n \"\"\"\n Basic Form + Mapping Fields\n \"\"\"\n return self.cleaned_data\n\n if step_numeral == 3:\n pass # end-user is previewing", "def OnSave(self, event):\r\n if self.filename == None:\r\n self.OnSaveAs(event)\r\n else:\r\n self.core.Save(self.filename)", "def save_form(self, request, form, change):\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)", "def save_form(self, request, form, change):\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)", "def save_form(self, request, form, change):\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)", "def save(self, *args, **kwargs):\n pass", "def save(self, *args, **kwargs):\n return", "def save(self, commit=True):\r\n event = super(RPEventCreateForm, self).save(commit)\r\n event.add_host(self.owner, main_host=True)\r\n hosts = self.cleaned_data.get(\"hosts\", [])\r\n for host in hosts:\r\n # prevent owner from being downgraded to normal host if they were added\r\n if host != self.owner:\r\n event.add_host(host)\r\n gms = self.cleaned_data.get(\"gms\", [])\r\n for gm in gms:\r\n event.add_gm(gm)\r\n for guest in self.cleaned_data.get(\"invites\", []):\r\n if guest in hosts or guest in gms or guest == self.owner:\r\n continue\r\n event.add_guest(guest)\r\n for org in self.cleaned_data.get(\"org_invites\", []):\r\n event.invite_org(org)\r\n plot = self.cleaned_data.get(\"plot\", None)\r\n if plot:\r\n # we create a blank PlotUpdate so that this is tagged to the Plot, but nothing has happened yet\r\n event.beat = plot.updates.create()\r\n event.save()\r\n self.pay_costs()\r\n self.post_event(event)\r\n return event", "def save_related(self, request, form, formsets, change):\n pass", "def save(self, event):\n self.saved_events.append(event)", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n super(self.__class__, self).save(*args, **kwargs)", "def save(self, handler, name):", "def save_field(request):\n try:\n # Get BPO Field based on type.\n bpo_field_id = request.POST.get('field_id')\n bpo_field = _GetField(bpo_field_id)\n # Assign new value.\n if request.POST.get('value'):\n bpo_field.value = request.POST.get('value')\n if request.POST.get('label'):\n bpo_field.label = request.POST.get('label')\n if request.POST.get('sub_fields'):\n bpo_field.sub_fields = json.loads(request.POST.get('sub_fields'))\n if request.POST.get('size'):\n new_size = int(request.POST.get('size'))\n if BPOField.SIZE_RANGE[0] <= new_size <= BPOField.SIZE_RANGE[1]:\n bpo_field.size = new_size\n if request.POST.get('label_size'):\n new_size = int(request.POST.get('label_size'))\n if BPOField.SIZE_RANGE[0] <= new_size <= BPOField.SIZE_RANGE[1]:\n bpo_field.label_size = new_size\n # Save object.\n bpo_field.save()\n except Exception as e:\n return HttpResponseBadRequest(e)\n else:\n return HttpResponse(json.dumps({'status': 'success'}), mimetype=\"application/x-javascript\")", "def save(self):\n\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self):\n pass", "def save(self, *args, **kwargs) -> None:\n pass", "def save(self, *args, **kwargs) -> None:\n pass", "def save(self, *args, **kwargs) -> None:\n pass", "def form_valid(self, form):\n form.save()\n return super().form_valid(form)", "def save (self):\n pass", "def save(self, **with_extra):\n\t\tif self.id:\n\t\t\tnew_fields = getattr(self._client, \"save_\" + self.method)(self, **with_extra)\n\t\t\tself._create_fields(new_fields)\n\t\t\treturn True\n\t\treturn False", "def save_model(self, request, instance, form, change):\n pass" ]
[ "0.6323458", "0.6105032", "0.60823196", "0.6060079", "0.59963053", "0.59963053", "0.59963053", "0.59842247", "0.5925766", "0.59208643", "0.592074", "0.59128934", "0.5910204", "0.5910204", "0.58927155", "0.58364564", "0.5804914", "0.57820785", "0.57811224", "0.57811224", "0.57811224", "0.57811224", "0.57811224", "0.5780534", "0.5780534", "0.5780534", "0.57658195", "0.57578415", "0.57562727", "0.5753393" ]
0.705376
0
Return datetime.datetime instance from any filename as tryydoyhhmmss.hld.root.root or styydoyhhmmss.hld.root.root
def date_from_filename(filename: str) -> datetime.datetime: if not filename.startswith(("st", "tr")) or not filename.endswith(".hld.root.root"): raise Exception("Filename must be like tryydoyhhmmss.hld.root.root " "or styydoyhhmmss.hld.root.root") yy = int(f"20{filename[2:4]}") doy = int(filename[4:7]) hh = int(filename[7:9]) mm = int(filename[9:11]) ss = int(filename[11:13]) return datetime.datetime.combine( datetime.date(yy, 1, 1) + datetime.timedelta(doy + 1), datetime.time(hour=hh, minute=mm, second=ss) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_datetime(filename) -> datetime:\n date_part = filename[-26:-7]\n return datetime.strptime(date_part, '%Y-%m-%d_%H-%M-%S')", "def extract_datetime(fpath):\n try:\n handle = open(fpath, 'rb')\n if hexlify(handle.read(2)) != hexlify(u'MZ'):\n handle.close()\n return\n except:\n return\n\n try:\n handle.seek(60, 0)\n offset = handle.read(4)\n offset = hexlify(offset[::-1])\n\n if offset == '':\n handle.close()\n return\n\n offset = int(offset, 16)\n handle.seek(offset+8, 0)\n dword = handle.read(4)\n handle.close()\n\n t = unpack(\">L\", dword[::-1])[0]\n except:\n return\n return datetime.datetime.fromtimestamp(t)", "def date_from_filename(filename):\n date_string = filename.split('.')[0] + '+0000'\n return datetime.datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S%z')", "def _date_from_filename(filename):\n time = os.path.basename(filename).split('_')[0]\n return datetime.date(int(time[0:4]),\n int(time[4:6]),\n int(time[6:]))", "def get_date_from_filename(filepath):\n print \"debug: %s\" % filepath\n fname = os.path.basename(filepath)\n print \"debug: %s\" % fname\n date = fname.split(\"_\")[0]\n if len(date) != 8:\n print \"debug: %s\" % date\n print \"debug: date len != 8\"\n return None\n try:\n int(date) # will raise an exception if not all digits\n if date[0:2] != '20':\n return None\n dateStr = \"%s %s %s\" % (date[0:4],date[4:6],date[6:8])\n retval = time.strptime(dateStr, \"%Y %m %d\")\n return retval\n except:\n traceback.print_exc()\n return None", "def creation_date_from_path(filename):\n try:\n return _find_datetime(os.path.basename(filename))\n except ValueError:\n pass\n\n try:\n return _find_date(os.path.basename(filename)[:10])\n except ValueError:\n pass\n\n try:\n return _find_date(os.path.basename(filename))\n except ValueError:\n pass\n\n try:\n return _find_date(os.path.dirname(filename))\n except ValueError:\n pass\n\n raise ValueError(f\"No date found in path for {filename}\")", "def date_from_filename(filename):\n\n if filename.startswith(\"Hearthstone Screenshot\"):\n # eg. Hearthstone Screenshot 01-15-17 17.27.24.png\n date_list = filename[23:31].split('-')\n date_list[2] = '20' + date_list[2] # 15->2015\n else: # underscored version pre mid 2015\n # eg. Hearthstone_Screenshot_1.3.2014.20.16.36.png\n date_list = filename[23:-13].split('.')\n if len(date_list[0]) == 1:\n date_list[0] = '0' + date_list[0]\n if len(date_list[1]) == 1:\n date_list[1] = '0' + date_list[1]\n\n time_list = filename[-12:-4].split('.')\n date_list[0], date_list[1] = date_list[1], date_list[0] # american->english date\n date_list.reverse()\n datetime = '/'.join([*date_list, *time_list])\n return datetime", "def get_date_from_filename(file_path):\n file_name = basename(file_path)\n name, _ = splitext(file_name)\n _, date = name.split('_')\n\n return date", "def get_datetime(name, time_fmt, path='.') -> datetime:\n try:\n return datetime.strptime(name.split('_', 1)[0], time_fmt)\n except ValueError:\n return datetime.fromtimestamp(os.path.getmtime(os.path.join(path, name)))", "def get_file_date(self, file: str) -> date:", "def get_timestamp_from_path(file_path):\n return int(file_path.split('_')[1].split('.')[0])", "def filename2date(filename):\r\n # Find the '-SC' in the filename.\r\n dash = filename.find('-SC')\r\n if dash:\r\n return datetime.datetime.strptime(filename[dash-7:dash], '%Y%j')\r\n else:\r\n raise ValueError('Landsat filename does not conform to expected format.')", "def file_creation_date(file_path):\n # Must be a valid path string\n assert os.path.isfile(file_path) is True\n\n unix_timestamp = os.path.getctime(file_path)\n\n return datetime.fromtimestamp(unix_timestamp)", "def parseTimeFromFilename(name, dataset):\r\n if dataset.lower() in ['zandmotor']:\r\n date = map(int, name[name.rfind('/')+1:-4].split('_'))\r\n return reader.daySinceEpoch(date[0], date[1], date[2])\r\n elif dataset.lower() in ['coastline']:\r\n return int(name[name.rfind('/')+1:name.rfind('/')+5])", "def parse_filename(filename, ext='.md'):\n filename, extension = os.path.splitext(filename)\n date, file = filename[:10], filename[11:]\n\n if extension == ext:\n try:\n datetime.datetime.strptime(date, '%Y-%m-%d')\n except Exception:\n print('The date prefix for {} is INVALID'.format(filename))\n return [None, None]\n\n return [date, file]\n return [None, None]", "def parse_data_from_file(path):\n print(path.stem)\n \n raw = path.stem.split('-')\n\n rawdate = raw[0][2:]\n print(rawdate)\n date = rawdate[6:] + \"/\" + rawdate[4:6] + '/' + rawdate[0:4]\n rawtime = raw[1]\n time = rawtime[0:2] + \"h\" + rawtime[2:4] + \"m\" + rawtime[4:6] + \"s\"\n dt = datetime.strptime(rawdate+rawtime, '%Y%m%d%H%M%S')\n print(dt)\n return dt", "def file_name_to_date(prediction_file_name):\n\n error_checking.assert_is_string(prediction_file_name)\n pathless_file_name = os.path.split(prediction_file_name)[-1]\n\n valid_date_string = pathless_file_name.split('.')[0].split('_')[1]\n _ = time_conversion.string_to_unix_sec(valid_date_string, DATE_FORMAT)\n\n return valid_date_string", "def file_name_to_date(saliency_file_name):\n\n error_checking.assert_is_string(saliency_file_name)\n\n pathless_file_name = os.path.split(saliency_file_name)[-1]\n valid_date_string = pathless_file_name.split('_')[1]\n _ = time_conversion.string_to_unix_sec(valid_date_string, DATE_FORMAT)\n\n return valid_date_string", "def get_seviri_file_time(file):\n if hasattr(file, '__iter__'):\n filenames = [f.split('/')[-1] for f in file]\n date = [datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50])) for f in filenames]\n else:\n f = file.split('/')[-1]\n date = datetime(int(f[38:42]), int(f[42:44]),\n int(f[44:46]), int(f[46:48]),\n int(f[48:50]))\n return date", "def creation_date(path_to_file, return_datetime=True):\n if platform.system() == 'Windows':\n created_at = os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n created_at = stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n created_at = stat.st_mtime\n\n if return_datetime:\n return datetime.fromtimestamp(created_at)\n else:\n return created_at", "def parse_dt_from_logfile_name(key):\n ### Check file date by regular expression\n keydate = re.search(\"([0-9]{4}[0-9]{2}[0-9]{2})\", key).group(1)\n \n key_dt = datetime.strptime(keydate, '%Y%m%d')\n return key_dt", "def file_get_mdatetime(filename):\n return datetime.datetime.utcfromtimestamp(os.path.getmtime(filename))", "def guess_file_date_format(filename):\n for line in open(filename):\n try:\n format = guess_format(line)\n except CannotParse:\n pass\n else:\n return format\n\n raise CannotParse(\"No date/time strings found in '%s'\" % filename)", "def parse_filename(filename): # , time_fmt=TIME_INFILE_FMT):\n # Split the name up into its \"blocks\"\n parts = filename.split(\"_\")\n hive_str, rpi_str = parts[1:3]\n day_str = parts[3]\n method = parts[5]\n\n # Parse Hive and RPi number\n hive = int(hive_str[-1])\n rpi = int(rpi_str[-1])\n method = method.strip(\".csv\")\n\n # # Parse timestring into a datetime object\n # dt_naive = datetime.strptime(t_str, time_fmt)\n # dt_utc = pytz.utc.localize(dt_naive)\n\n return hive, rpi, method, day_str", "def extract_obstime_from_name(filename, tz=13):\n name = filename.split(\"/\")[-1]\n datebits = name.split(\"--\")\n (Y,M,D) = datebits[0].split(\"-\")\n (h,m,s) = datebits[1].split(\"-\")\n ms = datebits[2].split(\".\")[0]\n tz = TimezoneInfo(utc_offset=tz*u.hour)\n t = datetime(int(Y), int(M), int(D), int(h), int(m), int(s), 1000*int(ms), tzinfo=tz)\n obstime = Time(t)\n obstime.format = 'unix'\n return obstime", "def get_date_input_file(file: str) -> str:\n # check format\n if not match_input_files(file):\n raise Exception(\"Not valid input file format\")\n\n else:\n date = result = re.search(r\"input_(.*)\\.feather\", file)\n return date.group(1)", "def __parseDailyFilename(self, f):\n base = os.path.basename(f)\n\n tokens = base.split('.')\n if len(tokens) < 6:\n # assume it's an old file in the format A2000089etcetc.tif i.e. ?YYYYDDD*\n yr = base[1:5]\n day = base[5:8]\n else:\n # assume it's a file in the newer format ?*.YYYY.DDD.etc format\n varname, yr, day, temporalSummary, res, spatialSummary = tokens[0:6]\n outTemplate = varname + \"{}.{}.{}.\" + \"{}.{}.{}.tif\".format(temporalSummary, res, spatialSummary)\n if self._outTemplate == \"FILLED-OUTPUT{}.{}.{}.TemporalSummary.Res.SpatialSummary.tif\":\n self._outTemplate = outTemplate\n else:\n assert self._outTemplate == outTemplate\n return day, yr", "def _read_antti_datetime(dt_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if dt_file.split('.')[-1] == 'gz':\n ff = gzip.open(dt_file, 'r')\n else:\n ff = open(dt_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n ymdHMS = np.genfromtxt(sIO, comments=\"%\")\n DT = np.array([dt.datetime(*elem) for elem in ymdHMS.astype('int')])\n sIO.close()\n\n return DT", "def _creation_date(path_to_file):\n if platform.system() == \"Windows\":\n return os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime", "def creation_date(file_path):\n date = None\n if platform.system() == 'Windows':\n try:\n date = os.path.getctime(file_path)\n except FileNotFoundError:\n print('FileNotFoundError: ' + file_path)\n UTILITIES_LOGGER.error('FileNotFoundError: ' + file_path)\n else:\n try:\n stat = os.stat(file_path)\n try:\n date = stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n date = stat.st_mtime\n except FileNotFoundError as e:\n print('FileNotFoundError: ' + file_path)\n UTILITIES_LOGGER.exception('FileNotFoundError: ' + file_path)\n # return datetime.datetime.fromtimestamp(int(date)).strftime('%Y-%m-%d %H:%M:%S')\n # Got error: OverflowError: timestamp out of range for platform time_t\n # When manually testing with existing files from Windows. Probably not an issue in real\n # usage, but fixed anyway with datetime.datetime.utcfromtimestamp().\n # See: https://stackoverflow.com/questions/3682748/converting-unix-timestamp-string-to-readable-date#comment30046351_3682808\n try:\n return datetime.datetime.utcfromtimestamp(int(date)).strftime('%Y-%m-%d %H:%M:%S')\n except OverflowError as e:\n # Getting OverflowError when testing with some files, not sure of root cause\n # This perhaps is only a problem when the file is read on creation when copied to sesison directory.\n # It gets read a second time when modified. Need to wait for the copy to finish?\n print('OverflowError: date value: ' + str(date) + ' for file:' + file_path)\n UTILITIES_LOGGER.exception('OverflowError: date:' + str(date) + ' file:' + file_path)\n return None" ]
[ "0.7622054", "0.7068473", "0.7055876", "0.7017632", "0.7011796", "0.6778424", "0.65450853", "0.6536547", "0.6466029", "0.64422166", "0.6317801", "0.62757915", "0.61950606", "0.615876", "0.61342496", "0.6119123", "0.6091253", "0.60516626", "0.60343385", "0.6009402", "0.5997254", "0.59617895", "0.59547573", "0.59127384", "0.5908232", "0.58138514", "0.58090204", "0.5802795", "0.57952935", "0.57839227" ]
0.7839974
0
Sets the created_by_id of this BigqueryConnection.
def created_by_id(self, created_by_id): self._created_by_id = created_by_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def created_by_id(self, created_by_id):\n self._created_by_id = created_by_id", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by_security_user_id(self, created_by_security_user_id):\n\n self._created_by_security_user_id = created_by_security_user_id", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by(self, created_by: \"str\"):\n self._attrs[\"createdBy\"] = created_by", "def created_by_id(self) -> str:\n return self.__created_by_id", "def created_by_id(self):\n return self._created_by_id", "def created_user(self, created_user):\n self._created_user = created_user", "def created_by(self):\n return self._created_by", "def id_user(self, id_user):\n\n self._id_user = id_user", "def created_by_user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"created_by_user_id\")", "def created_by(self):\n if \"createdBy\" in self._prop_dict:\n if isinstance(self._prop_dict[\"createdBy\"], OneDriveObjectBase):\n return self._prop_dict[\"createdBy\"]\n else :\n self._prop_dict[\"createdBy\"] = IdentitySet(self._prop_dict[\"createdBy\"])\n return self._prop_dict[\"createdBy\"]\n\n return None", "def sent_by_user_id(self, sent_by_user_id):\n\n self._sent_by_user_id = sent_by_user_id", "def created_by(self) -> str:\n return pulumi.get(self, \"created_by\")", "def created_by_user_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"created_by_user_id\")", "def set_userId(self, userId):\n self.authentication.userId = userId", "def created_by(self, is_updated=False):\n if not isinstance(self.json, dict):\n return\n if self.request.method == constants.PUT:\n is_updated = True\n\n created_by = 'system'\n if hasattr(self, 'user'):\n if self.user:\n created_by = self.user.email\n\n elif hasattr(self, 'google_user'):\n if self.google_user:\n created_by = self.google_user.get('email', 'system')\n\n key = 'created_by'\n if is_updated:\n key = 'updated_by'\n logging.info('{} is {}'.format(key, created_by))\n self.json[key] = created_by", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id" ]
[ "0.7827399", "0.7238225", "0.7238225", "0.7238225", "0.7238225", "0.7238225", "0.7238225", "0.6770423", "0.6478218", "0.6478218", "0.6478218", "0.6478218", "0.61777014", "0.6108819", "0.58232164", "0.5540735", "0.5509084", "0.5416939", "0.5345631", "0.52725977", "0.52501637", "0.5222347", "0.52173007", "0.5139465", "0.5118541", "0.5118541", "0.5118541", "0.5118541", "0.5118541", "0.5118541" ]
0.78331757
0
Sets the type of this BigqueryConnection.
def type(self, type): allowed_values = ["postgres", "redshift", "snowflake", "bigquery"] # noqa: E501 if type not in allowed_values: raise ValueError( "Invalid value for `type` ({0}), must be one of {1}" # noqa: E501 .format(type, allowed_values) ) self._type = type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setDataSetType(self, type):\n self.__data_set_type__ = type", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self._type = type", "def type(self, type: str):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type" ]
[ "0.66367114", "0.6553918", "0.6553918", "0.6532762", "0.6192238", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833", "0.6190833" ]
0.7026672
0
Sets the timeout_seconds of this BigqueryConnection.
def timeout_seconds(self, timeout_seconds): self._timeout_seconds = timeout_seconds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_timeout(self, seconds):\n self._timeout = seconds", "def set_timeout(self, timeout_secs):\n self._timeout_secs = timeout_secs", "def set_timeout(self, timeout: float) -> None:\n self._timeout = timeout", "def set_timeout(timeout):\n\n if timeout is None:\n raise TypeError(\"timeout is null!\")\n\n AceQLHttpApi.set_timeout(timeout)", "def set_timeout(self, timeout):\n pass", "def set_timeouts(self, timeouts):\n self._timeouts = timeouts", "def set_timeout(self, timeout):\n self.timeout = timeout", "def timeout(self, timeout):\n\n self._timeout = timeout", "def set_timeout(self, timeout):\n if self.interface is not None:\n self.interface.timeout = timeout", "def timeout(self, timeout):\n if (self.local_vars_configuration.client_side_validation and\n timeout is not None and not isinstance(timeout, int)):\n raise ValueError(\"Parameter `timeout` must be an integer\") # noqa: E501\n\n self._timeout = timeout", "def set_timeout(self, new_timeout):\n self.timeout = new_timeout\n self._update_timestamp()", "def timeout(self, timeout):\n assert timeout is None or timeout > 0\n self._timeout = timeout", "def setTimeOut(self, timeout=6.0):\n self.timeout = timeout", "def settimeout(self, to):\r\n self._timeout = to", "def set_timeout(self, connect_timeout, read_timeout, search_timeout = None):\n self.timeout = urllib3.util.timeout.Timeout(connect = connect_timeout, read = read_timeout)\n if (search_timeout != None):\n self.search_timeout = urllib3.util.timeout.Timeout(connect = connect_timeout, read = search_timeout)", "def set_wait_timeout(self, timeout):\n self.__wait_timeout = timeout", "def setConnectTimeout(self, timeoutMilliSeconds):\n\n _ExceptionUtil.raiseOnError(\n internals.blpapi_SessionOptions_setConnectTimeout(\n self.__handle,\n timeoutMilliSeconds))", "def set_timeout():\n try:\n seconds = int(request.get_json()['timeout_seconds'])\n except (KeyError, TypeError):\n return abort(400)\n anova_controller.set_timeout(seconds)\n output = {\"timeout_seconds\": seconds, }\n return jsonify(output)", "def timeout(self, seconds: Optional[float]):\n if (seconds is not None) and (seconds < 0):\n raise ValueError(\"negative\")\n\n self._timeout = seconds", "def _set_queue_timeout(self, queue_timeout):\n if (\n isinstance(queue_timeout, bool)\n or not isinstance(queue_timeout, int)\n or not queue_timeout > -1\n ):\n raise AttributeError(\n \"Connection queue_timeout value must be an integer greater or \"\n f\"equal to 0, the given value {queue_timeout} is not valid\"\n )\n\n self.queue_timeout = queue_timeout\n self.settings[\"queue_timeout\"] = (\n _CNX_POOL_QUEUE_TIMEOUT if queue_timeout == 0 else int(queue_timeout / 1000)\n )\n # To avoid a connection stall waiting for the server, if the\n # connect-timeout is not given, use the queue_timeout\n if \"connect-timeout\" not in self.settings:\n self.settings[\"connect-timeout\"] = self.queue_timeout", "def timeout(self, value):\n if isinstance(value, timedelta):\n value = value.days * 3600 * 24 + value.seconds\n self._timeout = value # noqa", "def set_timeout(self, timeout: int) -> None:\n raise WatchdogError(\"Setting timeout is not supported on {0}\".format(self.describe()))", "def set_timeout(self, timeout):\n if self._timeout != timeout:\n self._timeout = timeout\n if self._zerorpc:\n self.close()\n self.connect()", "def set_retry_timeout(self, retry_timeout):", "def setTimeout(self, timeout):\n self.timeout = timeout", "def settimeout(self, timeout):\r\n self.sock.settimeout(timeout)", "def settimeout(self, timeout):\n assert timeout > 0.0\n self.__timeout = timeout\n self.sock.settimeout(timeout)\n # We don't query the socket's timeout or check that they're still\n # correct. Since self.sock e is public this could be the wrong\n # timeout!", "def set_request_timeout(self, request_timeout: int) -> None:\n self.__http_client.request_timeout = request_timeout", "def settimeout(self, value: int) -> None:\n ...", "def settimeout(self,timeout=10):\r\n # Update\r\n self.timeout = timeout" ]
[ "0.7122399", "0.69995874", "0.68101287", "0.67899984", "0.66791314", "0.66160035", "0.65482914", "0.63968164", "0.6396411", "0.63825697", "0.6294084", "0.6264204", "0.62599796", "0.6172657", "0.6167423", "0.6155155", "0.6151825", "0.61297095", "0.6107146", "0.61068237", "0.61066073", "0.6095281", "0.60503465", "0.60171574", "0.6011589", "0.5979414", "0.597705", "0.5941396", "0.59258044", "0.59109074" ]
0.76349115
1
Sets the private_key_id of this BigqueryConnection.
def private_key_id(self, private_key_id): self._private_key_id = private_key_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def private_key(self, private_key):\n\n self._private_key = private_key", "def set_key_id(self, key_id=''):\n self.key_id = key_id", "def key_id(self, key_id):\n\n self._key_id = key_id", "def vscr_ratchet_group_session_set_private_key(self, ctx, my_private_key):\n vscr_ratchet_group_session_set_private_key = self._lib.vscr_ratchet_group_session_set_private_key\n vscr_ratchet_group_session_set_private_key.argtypes = [POINTER(vscr_ratchet_group_session_t), vsc_data_t]\n vscr_ratchet_group_session_set_private_key.restype = c_int\n return vscr_ratchet_group_session_set_private_key(ctx, my_private_key)", "def __init__(self, private_key):\n self._sk = ed25519.Ed25519PrivateKey.from_private_bytes(private_key.bytes)", "def setPrivate(self, private):\n\n self.private = private", "def private_ip(self, private_ip):\n self._private_ip = private_ip", "def private(self, private):\n\n self._private = private", "def private(self, private):\n\n self._private = private", "def __init__(self, private_key):\n if private_key:\n if isinstance(private_key, str): # base58 encoded string\n self.private_key = PrivateKey.from_b58check(private_key)\n else:\n self.private_key = private_key\n self.public_key = self.private_key.public_key\n else:\n self.private_key = None\n self.public_key = None", "def vpc_id(self, vpc_id):\n self._vpc_id = vpc_id", "def public_key(self, public_key):\n\n self._public_key = public_key", "def private(self, private: bool):\n\n self._private = private", "def public_key(self, public_key):\n self._public_key = public_key\n return self", "def private_comment(self, private_comment):\n\n self._private_comment = private_comment", "def __init__(__self__, *,\n id: str,\n private_ip_address: Optional[str] = None):\n pulumi.set(__self__, \"id\", id)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)", "def connection_id(self, connection_id: PublicId) -> None:\n if self._connection_id is not None:\n raise ValueError(\"connection_id already set!\") # pragma: nocover\n self._connection_id = connection_id", "def public_from_private(self, private_key):", "def set_google_id(self, google_id):\n self._google_id = google_id", "def test_set_private_key_setter(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def private_key(self):", "def set_key(self, key):\n self.key = key", "def server_id(self, server_id):\n\n self._server_id = server_id", "def server_id(self, server_id):\n\n self._server_id = server_id", "def set_project_id(self, project_id):\n self._project_id = project_id", "def export_private_key(self, private_key):\n error = vscf_error_t()\n result = self._lib_vscf_ecc.vscf_ecc_export_private_key(self.ctx, private_key.c_impl, error)\n VscfStatus.handle_status(error.status)\n instance = RawPrivateKey.take_c_ctx(result)\n return instance", "def is_private(self, is_private):\n\n self._is_private = is_private", "def project_id(self, project_id):\n\n self._project_id = project_id", "def project_id(self, project_id):\n\n self._project_id = project_id", "def set_AWSSecretKeyId(self, value):\n super(PutBucketWebsiteRedirectInputSet, self)._set_input('AWSSecretKeyId', value)" ]
[ "0.7320087", "0.6217251", "0.60491544", "0.5960115", "0.57581747", "0.5692094", "0.5617272", "0.5451287", "0.5451287", "0.54292107", "0.52818465", "0.522966", "0.522202", "0.5207448", "0.51840496", "0.5178242", "0.5171025", "0.5107503", "0.5091602", "0.5078473", "0.5045052", "0.503584", "0.5025461", "0.5025461", "0.5023709", "0.49968863", "0.49638265", "0.49614197", "0.49614197", "0.49266714" ]
0.78219444
0
Sets the private_key of this BigqueryConnection.
def private_key(self, private_key): self._private_key = private_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def private_key_id(self, private_key_id):\n\n self._private_key_id = private_key_id", "def __init__(self, private_key):\n self._sk = ed25519.Ed25519PrivateKey.from_private_bytes(private_key.bytes)", "def setPrivate(self, private):\n\n self.private = private", "def private(self, private):\n\n self._private = private", "def private(self, private):\n\n self._private = private", "def vscr_ratchet_group_session_set_private_key(self, ctx, my_private_key):\n vscr_ratchet_group_session_set_private_key = self._lib.vscr_ratchet_group_session_set_private_key\n vscr_ratchet_group_session_set_private_key.argtypes = [POINTER(vscr_ratchet_group_session_t), vsc_data_t]\n vscr_ratchet_group_session_set_private_key.restype = c_int\n return vscr_ratchet_group_session_set_private_key(ctx, my_private_key)", "def __init__(self, private_key):\n if private_key:\n if isinstance(private_key, str): # base58 encoded string\n self.private_key = PrivateKey.from_b58check(private_key)\n else:\n self.private_key = private_key\n self.public_key = self.private_key.public_key\n else:\n self.private_key = None\n self.public_key = None", "def private_ip(self, private_ip):\n self._private_ip = private_ip", "def private(self, private: bool):\n\n self._private = private", "def public_key(self, public_key):\n\n self._public_key = public_key", "def public_key(self, public_key):\n self._public_key = public_key\n return self", "def key_id(self, key_id):\n\n self._key_id = key_id", "def set_key_id(self, key_id=''):\n self.key_id = key_id", "def private_comment(self, private_comment):\n\n self._private_comment = private_comment", "def test_set_private_key_setter(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def public_from_private(self, private_key):", "def set_key(self, key):\n self.key = key", "def load_private_key(self, private_key):\n if not self.curve:\n self.curve = private_key.curve\n if self.curve != private_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.private_key = private_key\n return self.private_key.get_verifying_key()", "def private_key(self):", "def test_set_private_key(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption(private_key=self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def export_private_key(self, private_key):\n error = vscf_error_t()\n result = self._lib_vscf_ecc.vscf_ecc_export_private_key(self.ctx, private_key.c_impl, error)\n VscfStatus.handle_status(error.status)\n instance = RawPrivateKey.take_c_ctx(result)\n return instance", "def _wrap_privatekey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PrivateKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize private key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_private_key(der, password=None,\n backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])", "def is_private(self, is_private):\n\n self._is_private = is_private", "def __init__(self):\n self.public_key = None\n self._private_key = None", "def partition_key(self, partition_key):\n\n self._partition_key = partition_key", "def api_key(self, value):\n self.__creds.api_key_v2 = value", "def add(self, private_key):\n if not isinstance(private_key, PaillierPrivateKey):\n raise TypeError(\"private_key should be of type PaillierPrivateKey, \"\n \"not %s\" % type(private_key))\n self.__keyring[private_key.public_key] = private_key", "def key(self, key):\n\n self._key = key", "def key(self, key):\n\n self._key = key", "def SetAPIKey(self, api_key):\n self._api_key = api_key" ]
[ "0.7102385", "0.6089998", "0.6048516", "0.5918505", "0.5918505", "0.59148234", "0.58247757", "0.5821729", "0.5623071", "0.561844", "0.55904317", "0.54883444", "0.5413316", "0.5371647", "0.5363073", "0.5352474", "0.53235835", "0.52944875", "0.5232665", "0.5206534", "0.5205057", "0.52022016", "0.51805705", "0.5170972", "0.5146949", "0.5116991", "0.5064627", "0.5063093", "0.5063093", "0.5042946" ]
0.7588765
0
Sets the client_email of this BigqueryConnection.
def client_email(self, client_email): self._client_email = client_email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email", "def setEmail(self, email):\n self.email = email\n return self", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def email(self, email):\n\n self._email = email", "def customer_email(self, customer_email):\n self._customer_email = customer_email", "def email(self, email: str):\n\n self._email = email", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n email is not None and len(email) > 64):\n raise ValueError(\"Invalid value for `email`, length must be less than or equal to `64`\") # noqa: E501\n\n self._email = email", "def setCooperationClient(self, client):\n self.__cooperationClient = client", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def email_address(self, email_address):\n\n self._email_address = email_address", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def contact_email(self, contact_email):\n\n self._contact_email = contact_email", "def admin_email(self, admin_email):\n\n self._admin_email = admin_email", "def client_addresses(self, client_addresses):\n\n self._client_addresses = client_addresses", "def from_email(self, from_email):\n\n self._from_email = from_email", "def client_id(self, client_id):\n if client_id is None:\n raise ValueError(\"Invalid value for `client_id`, must not be `None`\") # noqa: E501\n\n self._client_id = client_id", "def client_id(self, client_id):\n if client_id is None:\n raise ValueError(\"Invalid value for `client_id`, must not be `None`\") # noqa: E501\n\n self._client_id = client_id" ]
[ "0.630568", "0.6241642", "0.6155655", "0.6155655", "0.6155655", "0.6155655", "0.6155655", "0.6155655", "0.6155655", "0.6155655", "0.6155655", "0.6155655", "0.6093457", "0.60278374", "0.6026907", "0.6026907", "0.6026907", "0.6026907", "0.58056325", "0.57187164", "0.5657896", "0.5657896", "0.5657896", "0.5608099", "0.5608099", "0.56077725", "0.5501318", "0.54601806", "0.54280454", "0.54280454" ]
0.805182
0
Sets the client_id of this BigqueryConnection.
def client_id(self, client_id): self._client_id = client_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def client_id(self, client_id):\n if client_id is None:\n raise ValueError(\"Invalid value for `client_id`, must not be `None`\") # noqa: E501\n\n self._client_id = client_id", "def client_id(self, client_id):\n if client_id is None:\n raise ValueError(\"Invalid value for `client_id`, must not be `None`\") # noqa: E501\n\n self._client_id = client_id", "def setCooperationClient(self, client):\n self.__cooperationClient = client", "def set_credentials(self, client_id=None, client_secret=None):\n self._client_id = client_id\n self._client_secret = client_secret\n\n # make sure to reset session due to credential change\n self._session = None", "def client_id(self):\n\n return self.__client_id", "def auth_token_provider_client_id(self, auth_token_provider_client_id):\n\n self._auth_token_provider_client_id = auth_token_provider_client_id", "def client_id(self):\n return self.__client_id", "def client_order_id(self, client_order_id):\n\n self._client_order_id = client_order_id", "def set(self, client):\n if not client:\n raise SurvoxAPIMissingParameter('client')\n c = self.get()\n if not c:\n raise SurvoxAPIRuntime('No client available named: {name}'.format(name=self.name))\n return self.api_put(endpoint=self.url, data=client)", "def client_name(self, client_name):\n if client_name is None:\n raise ValueError(\"Invalid value for `client_name`, must not be `None`\") # noqa: E501\n\n self._client_name = client_name", "def client_certificate_id(self, client_certificate_id):\n\n self._client_certificate_id = client_certificate_id", "def __init__(self, client_id: str):\n\n self._cs = aiohttp.ClientSession(\n loop=asyncio.get_event_loop(),\n raise_for_status=True,\n headers={\"Client-ID\": client_id},\n )", "def init_client(self, client):\n self.client = client", "def set_client_id(self):\n data = self.receive() # deserialized data\n client_id = data['clientid'] # extracts client id from data\n self.client_id = client_id # sets the client id to this client\n print(\"Successfully connected to server: \" + self.userInfo['host'] + \" / \" + str(self.userInfo['port']))\n print(\"Your client info is:\\n\" + \"Client Name: \" + self.userInfo['name'] + \"\\nClient ID: \" + str(client_id))", "def client_master_plan_id(self, client_master_plan_id):\n\n self._client_master_plan_id = client_master_plan_id", "def set_project_id(self, project_id):\n self._project_id = project_id", "def set_client_id(self, client=None, client_id=None):\n # cancel if its not a client object\n is_client = type(client) is Client\n\n if not is_client:\n return False\n\n # check id\n id_exists = client_id in [c.client_id for c in self.client_list]\n id_is_empty = client_id == ''\n id_is_own = client.client_id == client_id\n\n # cancel with true, if there's no need to change the ID\n if id_is_own:\n return True\n\n # cancel if it's no client or the client_id does already exist or is empty\n if id_exists or id_is_empty:\n return False\n\n # change every client_id of the projects of the original client\n for p in self.get_client_projects(client=client):\n # get old and new project\n old_p = p.copy()\n new_p = p.copy()\n new_p.client_id = client_id\n\n # set new projects client_id\n self.set_project_id(\n old_project=old_p,\n new_project=new_p\n )\n\n # rename the file\n self.rename_client_file(\n old_client_id=client.client_id,\n new_client_id=client_id\n )\n\n # get index\n index = self.get_client_index(client)\n\n # change the client_id of the original client to the new id\n self.client_list[index].client_id = client_id\n\n # get new client and save it\n self.save_client_to_file(client=self.client_list[index])\n\n return True", "def related_client_id(self, related_client_id):\n\n self._related_client_id = related_client_id", "def connection_id(self, connection_id: PublicId) -> None:\n if self._connection_id is not None:\n raise ValueError(\"connection_id already set!\") # pragma: nocover\n self._connection_id = connection_id", "def set_google_id(self, google_id):\n self._google_id = google_id", "def request_client_id(self) -> None:\n GCR.log.log(Logger.INFORMATION, \"Demande d'un id client\")\n self.send({\"action\": \"request_id\", \"username\": self.username})", "def client_id(self) -> str:\n return self.get_env_var(self.client_id_var)", "def client_id(self) -> str:\n return self.get_env_var(self.client_id_var)", "def project_id(self, project_id):\n\n self._project_id = project_id", "def project_id(self, project_id):\n\n self._project_id = project_id", "def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")", "def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")", "def client_id(self) -> str:\n return pulumi.get(self, \"client_id\")", "def client_master_plan_instance_id(self, client_master_plan_instance_id):\n\n self._client_master_plan_instance_id = client_master_plan_instance_id", "def tenant_id(self, tenant_id):\n\n self._tenant_id = tenant_id" ]
[ "0.7170458", "0.7170458", "0.6344065", "0.61503285", "0.6066484", "0.6047273", "0.6016593", "0.599482", "0.597772", "0.5858523", "0.5840003", "0.58148485", "0.57910585", "0.57744676", "0.5738312", "0.56799054", "0.56576747", "0.5600492", "0.55908877", "0.5560438", "0.551247", "0.5509364", "0.5509364", "0.5464484", "0.5464484", "0.5443789", "0.5443789", "0.5443789", "0.5426237", "0.53519493" ]
0.77427274
0
Sets the auth_uri of this BigqueryConnection.
def auth_uri(self, auth_uri): self._auth_uri = auth_uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_uri(self, uri):\n self.__uri = uri", "def __init__(self, auth, base_url=ANACODE_API_URL):\n self.auth = auth\n self.base_url = base_url", "def set_uri(self, uri):\r\n self.uri = uri", "def authentication_endpoint(self, authentication_endpoint):\n\n self._authentication_endpoint = authentication_endpoint", "def setAuthenticationOptions(self, authOptions):\n internals.blpapi_SessionOptions_setAuthenticationOptions(\n self.__handle,\n authOptions)", "def __init__(self, api_version=_BIGQUERY_API_VERSION):\n self.http = httplib2.Http(cache=memcache)\n self.service = discovery.build('bigquery',\n api_version,\n http=self.http,\n discoveryServiceUrl=DISCOVERY_URL)\n if _CREDENTIALS is None:\n raise BigQueryClientException(\n 'Needed Credentials are missing from this source code!')\n credentials = Credentials.new_from_json(_CREDENTIALS)\n logging.info('Authorizing...')\n self.http = credentials.authorize(self.http)", "def set_credentials(self, *args, **kwargs):\n pass", "def auth_protocol(self, auth_protocol):\n\n self._auth_protocol = auth_protocol", "def uri(self, uri):\n\n self._uri = uri", "def uri(self, uri):\n\n self._uri = uri", "def uri(self, uri):\n self._uri = uri", "def uri(self, uri):\n self._uri = uri", "def set_basic_auth(self, host, username, password):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def auth_password(self, auth_password):\n\n self._auth_password = auth_password", "def set_credentials(self, client_id=None, client_secret=None):\n self._client_id = client_id\n self._client_secret = client_secret\n\n # make sure to reset session due to credential change\n self._session = None", "def base_url(self, base_url):\n\n self._base_url = base_url", "def base_url(self, base_url):\n\n self._base_url = base_url", "def auth_url(self):\n\n return \"{}?client_id={}&redirect_uri={}&scope={}&state={}\".format(AUTH_ENDPOINT, self.client_id,\\\n self.redirect_uri, self.scope, self.state)", "def auth_token_provider_endpoint(self, auth_token_provider_endpoint):\n\n self._auth_token_provider_endpoint = auth_token_provider_endpoint", "def credentials(self, credentials):\n\n self._credentials = credentials", "def set_requests_auth(self):\n self.__auth = OAuth2(token=self.bearer_token)", "def set_uri(self, uri):\n # Parse URI\n parsed_uri = urllib.parse.urlparse(uri)\n # Separate out the user ID for HydroShare users\n contributor_pk = os.path.basename(parsed_uri.path.strip('/'))\n # Make sure this is a HydroShare user URI\n is_hs_user_uri = False\n try:\n validate_hydroshare_user_id(contributor_pk)\n is_hs_user_uri = True\n except ValidationError:\n pass\n\n if is_hs_user_uri:\n # Set rel_uri\n self.rel_uri = parsed_uri.path\n pk = None\n try:\n pk = int(contributor_pk)\n except ValueError:\n msg = \"User ID {0} is not an integer. User URI was {1}.\"\n raise GenericResourceMeta.ResourceMetaException(msg)\n\n assert (pk is not None)\n self.id = pk\n\n self.uri = uri", "def set_credentials(self, ipv4, user, passwd):\n self.ip = ipv4\n self.username = user\n self.password = passwd", "def _Connect(self):\n return bq.connect(self.api_endpoint_,\n self.auth_policy_.GetToken(),\n self.transport_)", "def init(self, auth_dict=None):\n self.auth_dict = auth_dict", "def set_connection(cls, user_name, password, end_point, session_verify):\n if not session_verify:\n requests.packages.urllib3.disable_warnings()\n\n cls.user_name = user_name\n cls.password = password\n cls.end_point = end_point\n\n cls.session = requests.Session()\n cls.session.auth = HTTPBasicAuth(user_name, password)\n cls.session.verify = session_verify", "def set_credentials(self, authenticator):\n pass", "def auth_username(self, auth_username):\n\n self._auth_username = auth_username", "def _set_tracker_uri(self, uri):\r\n parse_result = urlparse.urlparse(uri)\r\n if (parse_result.scheme.lower() not in ['http', 'https'] or\r\n not parse_result.netloc or not parse_result.query):\r\n raise InvalidUriError('Invalid tracker URI (%s)' % uri)\r\n qdict = cgi.parse_qs(parse_result.query)\r\n if not qdict or not 'upload_id' in qdict:\r\n raise InvalidUriError('Invalid tracker URI (%s)' % uri)\r\n self.tracker_uri = uri\r\n self.tracker_uri_host = parse_result.netloc\r\n self.tracker_uri_path = '%s/?%s' % (parse_result.netloc,\r\n parse_result.query)\r\n self.server_has_bytes = 0", "def change_authentication(self, client_id=None, client_secret=None,\n access_token=None, refresh_token=None):\n # TODO: Add error checking so you cannot change client_id and retain\n # access_token. Because that doesn't make sense.\n self.client_id = client_id or self.client_id\n self.client_secret = client_secret or self.client_secret\n self.access_token = access_token or self.access_token\n self.refresh_token = refresh_token or self.refresh_token" ]
[ "0.54572105", "0.53156954", "0.5259753", "0.5188325", "0.5149119", "0.5088627", "0.5087125", "0.5057519", "0.5033262", "0.5033262", "0.50223464", "0.50223464", "0.4960993", "0.49558327", "0.4926186", "0.49088418", "0.49088418", "0.489337", "0.48925218", "0.48908177", "0.48800477", "0.48611498", "0.4857947", "0.48470616", "0.4846188", "0.48368818", "0.48221958", "0.48108533", "0.48075137", "0.48062694" ]
0.73561597
0
Sets the token_uri of this BigqueryConnection.
def token_uri(self, token_uri): self._token_uri = token_uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def access_token_url(self, access_token_url):\n\n self._access_token_url = access_token_url", "def auth_token_provider_endpoint(self, auth_token_provider_endpoint):\n\n self._auth_token_provider_endpoint = auth_token_provider_endpoint", "def auth_uri(self, auth_uri):\n\n self._auth_uri = auth_uri", "def login_token(self, token):\n self.token = token # this will also set the refresh_token to None", "def set_token(self, token: AccessToken):\n self.access_token = token.access_token or \"\"\n if isinstance(token, AccessToken):\n self.refresh_token = token.refresh_token or \"\"\n self.token_type = token.token_type or \"\"\n self.expires_in = token.expires_in or 0\n\n lag = datetime.timedelta(seconds=-self.lag_time)\n if token.access_token and token.expires_in:\n lag = datetime.timedelta(seconds=token.expires_in - self.lag_time)\n self.expires_at = datetime.datetime.now() + lag", "def set_token(self, token):\n # type: (Token) -> None\n self.token = token\n self._token_header = \"Bearer \" + token[\"access_token\"]", "def __init__(self, token_path):\n self._accessToken = None\n self._tokenPath = token_path", "def token(self, token):\n\n self._token = token", "def token(self, token):\n\n self._token = token", "def with_uri(self, uri):\n if not isinstance(uri, str):\n raise TypeError('URI must be a string')\n\n self.token['uri'] = uri\n\n return self", "def token_values(self, token_values):\n\n self._token_values = token_values", "def set_access_token(self, access_token):\n self.access_token = access_token", "async def _tokenset(self, ctx: commands.Context, token: str):\n self.config[ctx.message.server.id] = token\n dataIO.save_json('data/football/config.json', self.config)\n await self.bot.say('football-data API token set')", "def device_token(self, device_token):\n \n self._device_token = device_token", "def token_refresh(self, token_refresh):\n\n self._token_refresh = token_refresh", "def request_token_url(self): # pragma: no cover\n raise NotImplementedError()", "def token_id_from(self, token_id_from):\n\n self._token_id_from = token_id_from", "def set_uri(self, uri):\n self.__uri = uri", "def set_oauth(self, consumer_token, access_token):\n self.consumer_token = consumer_token\n self.access_token = access_token", "def reset_token(self, reset_token):\n\n self._reset_token = reset_token", "def set_maptoken(self, token):\n self._data['maptoken'] = token", "def auth_token_provider_conn_timeout(self, auth_token_provider_conn_timeout):\n\n self._auth_token_provider_conn_timeout = auth_token_provider_conn_timeout", "def access_token(self, access_token):\n\n self._access_token = access_token", "def set_refresh_token(self, token):\n\n self.__current_request_mock.headers['Cookie'] = f'Refresh-Auth={token}'", "def set_uri(self, uri):\r\n self.uri = uri", "def set_access_token(self, token):\n\n self.__current_request_mock.headers['Authorization'] = token", "def auth_token_provider_client_id(self, auth_token_provider_client_id):\n\n self._auth_token_provider_client_id = auth_token_provider_client_id", "def auth_token_provider_scope(self, auth_token_provider_scope):\n\n self._auth_token_provider_scope = auth_token_provider_scope", "def token_id_to(self, token_id_to):\n\n self._token_id_to = token_id_to", "def api_token(self, api_token):\n\n self._api_token = api_token" ]
[ "0.60428053", "0.57762086", "0.56720537", "0.5606755", "0.5600332", "0.55200356", "0.5468206", "0.54573166", "0.54573166", "0.5306066", "0.5302382", "0.5284382", "0.52734005", "0.52444065", "0.5219296", "0.51599866", "0.5157792", "0.5157434", "0.5098574", "0.5070664", "0.5063862", "0.50614136", "0.50597924", "0.5018787", "0.5010909", "0.49988726", "0.49893218", "0.49734983", "0.49190697", "0.4900543" ]
0.73389703
0
Sets the auth_provider_x509_cert_url of this BigqueryConnection.
def auth_provider_x509_cert_url(self, auth_provider_x509_cert_url): self._auth_provider_x509_cert_url = auth_provider_x509_cert_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def client_x509_cert_url(self, client_x509_cert_url):\n\n self._client_x509_cert_url = client_x509_cert_url", "def set_ssl_context(self, ssl_verify, ssl_cafile):\n if not ssl_verify:\n self.ssl_context = ssl.create_default_context()\n self.ssl_context.check_hostname = False\n self.ssl_context.verify_mode = ssl.CERT_NONE\n elif ssl_cafile:\n self.ssl_context = ssl.create_default_context(cafile=ssl_cafile)\n else:\n self.ssl_context = ssl.create_default_context()", "def auth_uri(self, auth_uri):\n\n self._auth_uri = auth_uri", "def ca_cert_path(self, ca_cert_path: str):\n\n self._ca_cert_path = ca_cert_path", "def initialize_ssl(self):\n self.ssl_context = ssl.SSLContext()\n # if self.config.get('ca_file', None):\n # self.ssl_context.load_verify_locations(ca_file=self.config['ca_file'])\n\n # TODO : Remove this\n\n verify_ssl = self.config[\"AUTH\"][\"verify_ssl\"]\n if isinstance(verify_ssl, str):\n verify_ssl = strtobool(verify_ssl)\n\n if not verify_ssl:\n self.ssl_context.verify_mode = ssl.CERT_NONE", "def cdn_provider(self, cdn_provider):\n # type: (string_types) -> None\n\n if cdn_provider is not None:\n if not isinstance(cdn_provider, string_types):\n raise TypeError(\"Invalid type for `cdn_provider`, type has to be `string_types`\")\n\n self._cdn_provider = cdn_provider", "def tls_config(self, tls_config):\n\n self._tls_config = tls_config", "def cert(self, value):\n self._cert = value", "def auth_token_provider_conn_timeout(self, auth_token_provider_conn_timeout):\n\n self._auth_token_provider_conn_timeout = auth_token_provider_conn_timeout", "def response_kafka_connection_url(self, response_kafka_connection_url: str):\n\n self._response_kafka_connection_url = response_kafka_connection_url", "def auth_token_provider_endpoint(self, auth_token_provider_endpoint):\n\n self._auth_token_provider_endpoint = auth_token_provider_endpoint", "def ca_cert(self, ca_cert):\n\n self._ca_cert = ca_cert", "def set_ssl(self):\n for params in self.config.get_ssl_params():\n self.connection.transport.set_ssl(**params)", "def configure(self, source_url, hostcert, hostkey, ca_path):\n try:\n self.__url, self.__alias = source_url.split(\"#\")\n except ValueError:\n return \"Failed to parse URL#alias notation\"\n self.__hostcert = hostcert\n self.__hostkey = hostkey\n self.__ca_path = ca_path", "def auth_token_provider_relaxed_ssl(self, auth_token_provider_relaxed_ssl):\n\n self._auth_token_provider_relaxed_ssl = auth_token_provider_relaxed_ssl", "def __init__(self, url, **kwargs):\n self.hostname = self.getHostnameFromURL(url)\n\n # ``verify`` here refers to server-side verification of certificates\n # presented by a client:\n self.verify = False if self.isClient else True\n super(SSLVerifyingContextFactory, self).__init__(verify=self.verify,\n fixBrokenPeers=True,\n **kwargs)", "def _hinit(self, c):\n c.setopt(pycurl.SSL_VERIFYPEER, 0) # FIXME\n c.setopt(pycurl.SSL_VERIFYHOST, 1)", "def console_ca_cert(self, console_ca_cert):\n\n self._console_ca_cert = console_ca_cert", "def set_connection(cls, user_name, password, end_point, session_verify):\n if not session_verify:\n requests.packages.urllib3.disable_warnings()\n\n cls.user_name = user_name\n cls.password = password\n cls.end_point = end_point\n\n cls.session = requests.Session()\n cls.session.auth = HTTPBasicAuth(user_name, password)\n cls.session.verify = session_verify", "def auth_protocol(self, auth_protocol):\n\n self._auth_protocol = auth_protocol", "def auth_token_provider_client_id(self, auth_token_provider_client_id):\n\n self._auth_token_provider_client_id = auth_token_provider_client_id", "def client_cert(self, client_cert):\n\n self._client_cert = client_cert", "def ssl(self, cainfo=None, verify=True, cert=None, key=None):\n if cainfo:\n self.curl.setopt(pycurl.CAINFO, cainfo)\n\n if verify == False:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)\n else:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 1)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)\n if cert:\n #self.curl.setopt(pycurl.SSLCERTTYPE, \"DER\")\n self.curl.setopt(pycurl.SSLCERT, cert)\n if key:\n self.curl.setopt(pycurl.SSLKEY, key)", "def set_ssl_addr(self, addr):\n Server.t_ssl_addresses[threading.get_ident()] = addr", "def _maybeSetDefaultAuthDomain(self):\n auth_domain = os.environ.get(\"AUTH_DOMAIN\")\n if not auth_domain:\n os.environ['AUTH_DOMAIN'] = \"appscale.com\"", "def connect(self):\n if self._ignore_cert:\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)\n if self._ignore_referrals:\n ldap.set_option(ldap.OPT_REFERRALS, ldap.OPT_OFF)\n LOG.debug(\"LDAP connecting to %s\", self._url)\n self._server = ldap.initialize(self._url, bytes_mode=False)\n self._bind_to_service()", "def svn_client_get_ssl_client_cert_file_provider(svn_auth_provider_object_t_provider, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def signing_cert_url(self) -> str:\n return self[\"Sns\"][\"SigningCertUrl\"]", "def set_datajoint_config(jwt_payload: dict):\n dj.config['database.host'] = jwt_payload['databaseAddress']\n dj.config['database.user'] = jwt_payload['username']\n dj.config['database.password'] = jwt_payload['password']\n\n dj.conn(reset=True)", "def __init__(self, enterprise_cert_file_path):\n self._enterprise_cert_file_path = enterprise_cert_file_path\n self._cert = None\n self._sign_callback = None" ]
[ "0.6002955", "0.52392286", "0.5176855", "0.49263972", "0.4875974", "0.48706037", "0.48231563", "0.47724578", "0.47715896", "0.47594145", "0.4745421", "0.47402442", "0.47371715", "0.47316852", "0.47306815", "0.47133183", "0.469183", "0.4654107", "0.46396813", "0.45899627", "0.4551938", "0.45503026", "0.4529833", "0.45003414", "0.44829997", "0.4471187", "0.4464255", "0.44640002", "0.4461505", "0.4381069" ]
0.8237836
0
Sets the client_x509_cert_url of this BigqueryConnection.
def client_x509_cert_url(self, client_x509_cert_url): self._client_x509_cert_url = client_x509_cert_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def client_cert(self, client_cert):\n\n self._client_cert = client_cert", "def auth_provider_x509_cert_url(self, auth_provider_x509_cert_url):\n\n self._auth_provider_x509_cert_url = auth_provider_x509_cert_url", "def org_apache_felix_https_clientcertificate(self, org_apache_felix_https_clientcertificate: ConfigNodePropertyDropDown):\n\n self._org_apache_felix_https_clientcertificate = org_apache_felix_https_clientcertificate", "def client_certificate_id(self, client_certificate_id):\n\n self._client_certificate_id = client_certificate_id", "def ca_cert_path(self, ca_cert_path: str):\n\n self._ca_cert_path = ca_cert_path", "def org_apache_felix_https_clientcertificate(self) -> ConfigNodePropertyDropDown:\n return self._org_apache_felix_https_clientcertificate", "def ca_cert(self, ca_cert):\n\n self._ca_cert = ca_cert", "def setCooperationClient(self, client):\n self.__cooperationClient = client", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def client_id(self, client_id):\n\n self._client_id = client_id", "def cert(self, value):\n self._cert = value", "def client_certificate(self) -> str:\n return pulumi.get(self, \"client_certificate\")", "def client_certificate(self) -> str:\n return pulumi.get(self, \"client_certificate\")", "def client_addresses(self, client_addresses):\n\n self._client_addresses = client_addresses", "def console_ca_cert(self, console_ca_cert):\n\n self._console_ca_cert = console_ca_cert", "def set_ssl_context(self, ssl_verify, ssl_cafile):\n if not ssl_verify:\n self.ssl_context = ssl.create_default_context()\n self.ssl_context.check_hostname = False\n self.ssl_context.verify_mode = ssl.CERT_NONE\n elif ssl_cafile:\n self.ssl_context = ssl.create_default_context(cafile=ssl_cafile)\n else:\n self.ssl_context = ssl.create_default_context()", "def response_kafka_connection_url(self, response_kafka_connection_url: str):\n\n self._response_kafka_connection_url = response_kafka_connection_url", "def client_certificate_config(self) -> Optional[pulumi.Input['ClientCertificateConfigArgs']]:\n return pulumi.get(self, \"client_certificate_config\")", "def __init__(__self__, *,\n issue_client_certificate: Optional[pulumi.Input[bool]] = None):\n if issue_client_certificate is not None:\n pulumi.set(__self__, \"issue_client_certificate\", issue_client_certificate)", "def __init__(self, url, **kwargs):\n self.hostname = self.getHostnameFromURL(url)\n\n # ``verify`` here refers to server-side verification of certificates\n # presented by a client:\n self.verify = False if self.isClient else True\n super(SSLVerifyingContextFactory, self).__init__(verify=self.verify,\n fixBrokenPeers=True,\n **kwargs)", "def client_email(self, client_email):\n\n self._client_email = client_email", "def ca_cert_path(self) -> str:\n return self._ca_cert_path", "def __init__(__self__, *,\n client_certificate_config: Optional[pulumi.Input['ClientCertificateConfigArgs']] = None,\n cluster_ca_certificate: Optional[pulumi.Input[str]] = None,\n password: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n if client_certificate_config is not None:\n pulumi.set(__self__, \"client_certificate_config\", client_certificate_config)\n if cluster_ca_certificate is not None:\n pulumi.set(__self__, \"cluster_ca_certificate\", cluster_ca_certificate)\n if password is not None:\n pulumi.set(__self__, \"password\", password)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "def set_proxy(self, proxy_url: str):\n if self._is_closed:\n raise KustoClosedError()", "def ssl(self, cainfo=None, verify=True, cert=None, key=None):\n if cainfo:\n self.curl.setopt(pycurl.CAINFO, cainfo)\n\n if verify == False:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 0)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 0)\n else:\n self.curl.setopt(pycurl.SSL_VERIFYPEER, 1)\n self.curl.setopt(pycurl.SSL_VERIFYHOST, 2)\n if cert:\n #self.curl.setopt(pycurl.SSLCERTTYPE, \"DER\")\n self.curl.setopt(pycurl.SSLCERT, cert)\n if key:\n self.curl.setopt(pycurl.SSLKEY, key)", "def configure(self, source_url, hostcert, hostkey, ca_path):\n try:\n self.__url, self.__alias = source_url.split(\"#\")\n except ValueError:\n return \"Failed to parse URL#alias notation\"\n self.__hostcert = hostcert\n self.__hostkey = hostkey\n self.__ca_path = ca_path", "def set_service_host(self, host):\n self._api_host = f\"https://{host}\"", "def client_id(self, client_id):\n if client_id is None:\n raise ValueError(\"Invalid value for `client_id`, must not be `None`\") # noqa: E501\n\n self._client_id = client_id" ]
[ "0.67225325", "0.63417757", "0.6191684", "0.6082272", "0.5772625", "0.5432383", "0.53903365", "0.52588344", "0.5203637", "0.5203637", "0.5203637", "0.5203637", "0.51629514", "0.51223", "0.51223", "0.5081893", "0.5026269", "0.501235", "0.50076276", "0.4981881", "0.49137843", "0.4880819", "0.4851608", "0.4816158", "0.48011276", "0.47692806", "0.47539648", "0.4750382", "0.47355008", "0.473416" ]
0.8200095
0
Select all Mip objects & print.
def select_all_mip_objects(): for mip in Mip.objects.all(): print mip
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printall():\n print listAll()", "def uglyprint(self):\n\n ctmp = self.conn.cursor()\n ctmp.execute(\"SELECT * FROM ATOM\")\n print(ctmp.fetchall())", "def printAllPion(self):\n idx = 0\n for pion in self.arrayPion:\n print(\"ID = \", idx, end=\" --> \")\n pion.printPion()\n idx += 1", "def PrintFilterCollection(self):\n self._filter_collection.Print(self._output_writer)", "def print_entries(self):\n self.print_selected_entries(self.entries)", "def getItems(self):\n for object in self.database:\n print(object)", "def print_all(self):\r\n for e in self.channels:\r\n e.print()", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Physical Resource ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')\n print(indent, \"|-IP address:\", self.IP_address, sep='')\n print(indent, \"|-MAC address:\", self.MAC_address, sep='')", "def all():\n session = session_maker(\n app.config['MYSQL_USER'], app.config['MYSQL_PASS'], app.config['MYSQL_SERVER_PORT_3306_TCP_ADDR'],\n app.config['MYSQL_SERVER_PORT_3306_TCP_PORT'], app.config['DB'])\n\n print(\n tabulate(\n selection_list_all(session),\n headers=['number', 'sqlid', 'name', 'city', 'state']))", "def print_elements(self):\n for element in self.elements:\n element.print_element()", "def print_PQ(q):\n for item in q:\n print(str(item), end=' ')\n print()", "def print_list(self):\r\n pass", "def dumpo(self):\n return self.do_all()", "def ShowAllIPC(cmd_args=None):\n for t in kern.tasks:\n print GetTaskSummary.header + \" \" + GetProcSummary.header\n pval = Cast(t.bsd_info, 'proc *')\n print GetTaskSummary(t) + \" \" + GetProcSummary(pval)\n print PrintIPCInformation.header\n PrintIPCInformation(t.itk_space, False, False) + \"\\n\\n\"", "def _print_findings(self) -> None:\n for ip_address in self._ip_addresses:\n print(f\"{ip_address}\")", "def print_all(cls):\n [print('{0} = \"{1}\"'.format(k, v)) for (k, v) in cls.all()]", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Cloud Virtual Resource ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')\n print(indent, \"|-IP address:\", self.IP_address, sep='')\n print(indent, \"|-URL:\", self.URL, sep='')\n\n if self.related_phys_rsrc_ID_list != None:\n if len(self.related_phys_rsrc_ID_list) >0:\n print(indent, \"|-related/associated physical resource(s):\", sep='')\n for phys_resource_ID in self.related_phys_rsrc_ID_list:\n phys_resource_item = get_indexed_item_from_list(phys_resource_ID, AutoResilGlobal.physical_resource_list)\n if phys_resource_item != None:\n phys_resource_item.printout_all(indent_level+1)", "def print_queue(self):\n for i in self.Obs:\n print(i)", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def print_all(jobs):\n\n if len(jobs) == 0:\n print('print_all() recieved empty input')\n return\n\n for job in jobs:\n if job.is_relevant:\n print(job)\n else:\n continue", "def print(self, file):\n for member in self.members:\n member.print(file)", "def print_results(self):\n pass", "def print_out():\n pass", "def mprint(self):\n for i in range(len(self.matrix)):\n for j in self.matrix[i]:\n print(j, end=\" \")\n print()\n pass", "def dump_objects(self):\n #print 'Object Count: ', self.object_store.len()\n \n for item in self.object_store:\n print 'Object Name: ', item.__dict__['Name'], ' LocalID: ', item.__dict__['LocalID']", "def Print_Items(db):\r\n \r\n for item in db.Transaction.find():\r\n print(item)", "def print_list(self):\r\n print(\"Displaying each metric:\")\r\n print(\"======\")\r\n for metric in self.metrics:\r\n metric.whoami()\r\n print(\"======\")\r\n print(self.metrics)\r\n print(\"END\")\r\n print()", "def view_all_persons():\n message = ''\n global conn\n with conn:\n rows = select_all_persons(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Person Table', message)", "def start(self):\n for circuit in self.circuits:\n self.modes[self.print_mode](circuit)", "def printout_all(self, indent_level):\n indent = \" \"*indent_level*INDENTATION_MULTIPLIER\n\n print(indent, \"Metric Definition ID:\", self.ID, sep='')\n print(indent, \"|-name:\", self.name, sep='')\n\n print(indent, \"|-info:\", self.info, sep='')" ]
[ "0.64728993", "0.57332945", "0.5684565", "0.5627472", "0.5613597", "0.55839", "0.5563493", "0.55597293", "0.55313796", "0.5501408", "0.5500758", "0.5500569", "0.5489608", "0.54356", "0.54109913", "0.5383435", "0.53405464", "0.53245205", "0.5307608", "0.53062916", "0.53043556", "0.52902895", "0.524157", "0.5221206", "0.5219876", "0.52149224", "0.5205312", "0.5201844", "0.51982427", "0.51959556" ]
0.7677657
0
Select all Mip objects located on specific reference sequence.
def select_mips_from_reference_seq(reference_name): for mip in Mip.objects.filter(reference_id=reference_name): print mip
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_all_mip_objects():\n\n for mip in Mip.objects.all():\n print mip", "def _query_sequence_sources(self):\n pass", "def selectRefs(*args):\n sel = cmds.textScrollList(widgets[\"shotAssListTSL\"], q=True, si=True)\n roots = []\n nodes = []\n if sel:\n for s in sel:\n path = pi.referenceDictionary[s]\n nodes.append(cmds.referenceQuery(path, nodes=True))\n roots = cFuncs.getTopNodes(nodes[0])\n cmds.select(cl=True)\n for x in roots:\n cmds.select(x, add=True)", "def _query_sequence_sources(self):\n if self.uniprot_id:\n self._query_uniprot()\n elif self.ncbi_id:\n self._query_ncbi()\n if \"mutations\" in self.metadata.keys():\n mutations = self.metadata[\"mutations\"].split()\n del self.metadata[\"mutations\"] # remove mutations, will be added subsequently\n for mutation in mutations:\n import re\n\n if mutation.startswith(\"ins\"): # insertion\n logger.debug(f\"Performing insertion {mutation} ...\")\n match = re.search(\"ins(?P<position>[0-9]+)(?P<insertion>[A-Z]+)\", mutation)\n self.insert(int(match.group(\"position\")), match.group(\"insertion\"))\n elif mutation.startswith(\"del\"): # deletion\n logger.debug(f\"Performing deletion {mutation} ...\")\n match = re.search(\n \"del(?P<first>[0-9]+)-(?P<last>[0-9]+)(?P<insertion>[A-Z]*)\",\n mutation,\n )\n self.delete(\n int(match.group(\"first\")),\n int(match.group(\"last\")),\n match.group(\"insertion\"),\n )\n else: # substitution\n logger.debug(f\"Performing substitution {mutation} ...\")\n self.substitute(mutation)\n if \"construct_range\" in self.metadata.keys():\n logger.debug(f\"Cropping sequence to construct {self.metadata['construct_range']} ...\")\n first, last = [int(x) for x in self.metadata[\"construct_range\"].split(\"-\")]\n self._sequence = self._sequence[first - 1 : last] # 1-indexed", "def select_mips_for_sample(sample_id):\n\n for sam in Samples.objects.filter(sample_fk_id=sample_id):\n print sam.mip_fk_id", "def select_children(self):\n objs = []\n for obj in pm.selected():\n objs.extend(obj.listRelatives(ad=True, type=[\"transform\", \"joint\"]))\n pm.select(objs, add=True)", "def targetids(obj, reftype):", "def _random_subset(self, pa_nodes, seq, m, rng):\n targets = set()\n while len(targets) < m:\n x = rng.choice(seq)\n # if x in pa_nodes:\n if pa_nodes.get(x, False):\n targets.add(x)\n else:\n pass\n return targets", "def test_selection():\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_answer_0)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy())\n nrg = ncs_obj_phil.get_ncs_restraints_group_list()\n\n m1 = list(nrg[0].master_iselection)\n c1 = list(nrg[0].copies[0].iselection)\n c2 = list(nrg[0].copies[1].iselection)\n\n assert len(m1) == len(c1) # renumbering\n assert m1 == [0, 1, 2, 3, 4, 5, 6] # 0, 1, X, 3, X, 5, X | 0, 1, 3\n assert c1 == [7, 8, 9, 10, 11, 12, 13] # 7, 8, 9, X, X, 12, X | 4, 5, 7\n assert c2 == [14, 15, 16, 17, 18, 19, 20] # 14, 15, X, 17, X, 19, X | 8, 9, 11\n\n selection1 = flex.size_t([0,1,5,3,100,101])\n selection2 = flex.size_t([0,1,5,3,7,8,9,12,100,101])\n selection3 = flex.size_t([0,1,5,3,7,8,9,12,14,15,19,17,100,101])\n # gone iseqs for selection3: 2,4,6,10,11,13,16,18,20-99\n\n new_nrg = nrg.select(flex.bool(102, selection1))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n\n assert mt == []\n assert c1t == []\n\n # atoms selected in both master and copies\n new_nrg = nrg.select(flex.bool(102, selection2))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n\n assert mt == []\n assert c1t == []\n\n new_nrg = nrg.select(flex.bool(102, selection3))\n # only atoms in master are selected\n mt = list(new_nrg[0].master_iselection)\n c1t = list(new_nrg[0].copies[0].iselection)\n c2t = list(new_nrg[0].copies[1].iselection)\n\n assert mt == [0, 1, 3], list(mt)\n assert c1t == [4, 5, 7], list(c1t)\n assert c2t == [8, 9, 11], list(c2t)", "def select_for_target(self, target):\n\n return [x for x in self.objects if x.target == target]", "def extract_BeambyName(self,model, obj_refs):\n seleceted_beam_names = [rs.ObjectName(name)[:-5] for name in obj_refs]\n selected_beams = []\n for name in seleceted_beam_names:\n for beam in model.beams:\n if(beam.name == name):\n selected_beam = beam \n selected_beams.append(selected_beam)\n break\n assert (selected_beam != None for selected_beam in selected_beams)\n return (selected_beams)", "def select(self, m, population):\n pass", "def selectListOfResidues(rigidbody, lst):\n atsel = AtomSelection()\n atsel.SetRigid(rigidbody)\n for i in lst:\n sel = rigidbody.SelectResRange(i, i)\n atsel = atsel | sel\n return atsel", "def selectResI(self):\n\n\t\tif len(self.resi) == 0:\t\n\t\t\treturn\n\n\t\ttmplist = []\n\t\tfor atom in self.atomlist:\n\t\t\tfound = False\n\t\t\tfor resi in self.resi:\n\t\t\t\tif int(atom.parentResidue.file_id) == resi:\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\n\t\t\tif found and not self.invresi:\n\t\t\t\ttmplist.append(atom)\n\t\t\tif not found and self.invresi:\n\t\t\t\ttmplist.append(atom)\n\n\t\tself.atomlist = tmplist", "def select(self, chromosomes: ChromList) -> ChromList:\n raise NotImplementedError", "def referenceMultiple(*args):\n num = cmds.intFieldGrp(widgets[\"shotActionRefMultIFG\"], q=True, v1=True)\n path = \"\"\n path = getSelectMasteredAsset()\n\n if path: \n for x in range(num):\n print \"shotWin.referenceAsset: referencing in - - -\", path\n cFuncs.referenceInWithNamespace(path)\n # now refresh. . .\n populateMasteredAssets()\n else:\n cmds.warning(\"You must select an asset from the lists to reference in!\")", "def select(self, target):", "def _random_subset(seq,m):\n targets=set()\n while len(targets)<m:\n x=random.choice(seq)\n targets.add(x)\n return targets", "def select_objects(remote, objects_list):\n select_objects = mmapi.vectori();\n for object in objects_list:\n select_objects.push_back(object);\n cmd2 = mmapi.StoredCommands()\n cmd2.AppendSceneCommand_SelectObjects(select_objects)\n remote.runCommand(cmd2)", "def _get_all_sequences(self):\n if self._sequences is None:\n q = \"SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'\"\n self._sequences = set([c.relname for c in self.query(q)])\n return self._sequences", "def mget(cls, pks):\n if not pks:\n return []\n return DBSession().query(cls) \\\n .filter(cls.id.in_(pks)) \\\n .all()", "def rx(target: QubitInput, angle: float) -> Iterable[Instruction]:\n return [Instruction(Rx(angle), target=qubit) for qubit in QubitSet(target)]", "def _query(self):\n # When search matches no minions, salt prints to stdout.\n # Suppress stdout.\n _stdout = sys.stdout\n sys.stdout = open(os.devnull, 'w')\n\n self.local.cmd('*', 'saltutil.pillar_refresh')\n minions = self.local.cmd('*', 'pillar.get', ['minion_nodes'],\n tgt_type=\"compound\")\n sys.stdout = _stdout\n for minion in minions:\n if minions[minion]:\n return minions[minion]\n\n return []", "def get( self, selection=\"\" ):\n structures = []\n for row in self.db.select( selection=selection ):\n structures.append( self.db.get_atoms(id=row.id) )\n return structures", "def plates(self):\n with sql_connection.TRN as TRN:\n sql = \"\"\"SELECT DISTINCT plate_id\n FROM qiita.container\n LEFT JOIN qiita.well USING (container_id)\n WHERE latest_upstream_process_id = %s\n ORDER BY plate_id\"\"\"\n TRN.add(sql, [self.process_id])\n plate_ids = TRN.execute_fetchflatten()\n return [plate_module.Plate(plate_id) for plate_id in plate_ids]", "def _random_subset(seq, m, seed):\n targets = set()\n random.seed(seed)\n\n while len(targets) < m:\n x = random.choice(seq)\n targets.add(x)\n return targets", "def find(self, *args):\n return _ida_frame.xreflist_t_find(self, *args)", "def _random_subset(seq,m):\n targets=random.sample(seq,m)\n return targets", "def sequence_items(self):\r\n seq_css = 'ol#sequence-list>li>a>p'\r\n return self.q(css=seq_css).map(self._clean_seq_titles).results", "def find_xrefs_multi_async(xrefs):\n # The IN operator does multiple sequential queries and ORs them\n # together. This is slow here-- a range query is faster, since\n # this is used to get xrefs for a set of contiguous builds.\n if not xrefs: # nothing => nothing\n raise ndb.Return({})\n xrefs = set(xrefs)\n issues = yield GHIssueDigest.query(\n GHIssueDigest.xref >= min(xrefs),\n GHIssueDigest.xref <= max(xrefs)).fetch_async(batch_size=500)\n refs = {}\n for issue in issues:\n for xref in issue.xref:\n if xref in xrefs:\n refs.setdefault(xref, []).append(issue)\n raise ndb.Return(refs)" ]
[ "0.6281258", "0.57194257", "0.5556252", "0.5519967", "0.51685905", "0.5045356", "0.5012468", "0.48961517", "0.48957306", "0.4891946", "0.4857247", "0.48408654", "0.47919556", "0.47807539", "0.4718705", "0.46947885", "0.46761888", "0.464906", "0.46303573", "0.46256158", "0.4623529", "0.45964462", "0.45765176", "0.456838", "0.45673457", "0.4558694", "0.45476747", "0.45434093", "0.45249045", "0.4518982" ]
0.76783764
0
Select and print all Mip objects for specific sample_id.
def select_mips_for_sample(sample_id): for sam in Samples.objects.filter(sample_fk_id=sample_id): print sam.mip_fk_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_all_mip_objects():\n\n for mip in Mip.objects.all():\n print mip", "def get_sample_by_id():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id)\n sample = sample_to_readable(r.json().get('data'))\n md = tableToMarkdown('ThreatGrid - Sample', [sample], [\n 'ID', 'Filename', 'State', 'Status', 'MD5', 'SHA1', 'SHA256', 'OS', 'SubmittedAt', 'StartedAt', 'CompletedAt'\n ])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': sample},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })", "def export_sample_related_mip(args):\n clarity_epp.export.sample.sample_related_mip(lims, args.process_id, args.output_file)", "def get_sample_state_by_id():\n ids = [] # type: list\n if demisto.getArg('ids'):\n ids += argToList(demisto.getArg('ids'))\n if demisto.getArg('id'):\n ids.append(demisto.getArg('id'))\n response = get_sample_state_helper(ids)\n md = tableToMarkdown('ThreatGrid - Sample state', response['samples'], ['ID', 'State'])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': response['samples']},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': response['requests']\n })", "def get_pcap_by_id():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id + '/network.pcap')\n ec = {'ThreatGrid.Sample.Id': sample_id}\n demisto.results([\n {\n 'Type': entryTypes['note'],\n 'EntryContext': ec,\n 'HumanReadable': '### ThreatGrid Sample Run PCAP File -\\n'\n + 'Your sample run PCAP file download request has been completed successfully for '\n + sample_id,\n 'Contents': ec,\n 'ContentsFormat': formats['json']\n },\n fileResult(sample_id + '-pcap.json', r.content)\n ])", "def get_pmids(path, output):\n from pybel import from_pickle\n graph = from_pickle(path)\n for pmid in get_pubmed_identifiers(graph):\n click.echo(pmid, file=output)", "def select_mips_from_reference_seq(reference_name):\n\n for mip in Mip.objects.filter(reference_id=reference_name):\n print mip", "def samplesim(conn, sample, threshold, fp):\n click.echo('Fingerprint: %s, Threshold: %s' % (fp, threshold))\n cur = conn.cursor()\n mol_ids = sample.read().strip().split('\\n')\n cur.execute(\"set rdkit.tanimoto_threshold=%s;\", (threshold,))\n for i, mol_id in enumerate(mol_ids[:100]):\n click.echo('Query: %s (%s of %s)' % (mol_id, i+1, len(mol_ids)))\n cur.execute(\"select entity_id from chembl_id_lookup where chembl_id = %s\", (mol_id,))\n molregno = cur.fetchone()[0]\n cur.execute(\"select %s from rdk.fps where molregno = %s\", (AsIs(fp), molregno,))\n qfp = cur.fetchone()[0]\n cur.execute(\"select molregno from rdk.fps where %s%%%s\", (AsIs(fp), qfp,))\n results = [r[0] for r in cur.fetchall()]\n chembl_ids = []\n for mrn in results:\n cur.execute(\"select chembl_id from chembl_id_lookup where entity_id = %s and entity_type = 'COMPOUND'\", (mrn,))\n chembl_ids.append(cur.fetchone()[0])\n click.echo(chembl_ids)\n cur.close()\n conn.close()", "def qc_sample_mip(args):\n clarity_epp.qc.sample.set_mip_data_ready(lims, args.process_id)", "def get_processes_by_id():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id + '/processes.json')\n ec = {'ThreatGrid.Sample.Id': sample_id}\n demisto.results([\n {\n 'Type': entryTypes['note'],\n 'EntryContext': ec,\n 'HumanReadable': '### ThreatGrid Sample Run Processes File -\\n'\n + 'Your sample run processes file download request has been completed successfully for '\n + sample_id,\n 'Contents': r.json(),\n 'ContentsFormat': formats['json']\n },\n fileResult(sample_id + '-processes.json', r.content)\n ])", "def getSampleDetailList(self, study_id):\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('get_sample_detail_list', [study_id, results])\n sample_details = []\n for sample_name, sample_id, public, collection_date, run_prefix, sequence_count, otu_count, otu_percent_hit in results:\n sample_details.append((sample_name, sample_id, public, collection_date, run_prefix, sequence_count, otu_count, otu_percent_hit))\n return sample_details", "def target_sample(n, data = train):\n for i in range(n):\n sample = randint(0, len(data))\n print('Sample: ' + str(sample) + \"\\n\" + 'Target: ' + str(data.iloc[sample][\"target\"]) + '\\n' + 'Text: ' + data.iloc[sample][\"excerpt\"] + '\\n\\n'\n )", "def show_by_sample(request, sample_id, page_num=1):\n\tsample = Sample.objects.get(id=sample_id)\n\tquery = models.AnalysisAttribute.objects.filter(sample=sample)\n\tfiltered = False\n\tif 'showvalue' in request.GET and request.GET['showvalue']:\n\t\tquery = query.filter(value__isnull=False)\t\t\n\t\tfiltered=True\n\tpaginator = Paginator(query.order_by('-date_time_last_updated'), 10)\n\tpage = paginator.page( page_num )\n\tanalyses = page.object_list\n\treturn render_to_response('analysis/show_by_sample.html', locals())", "def sample_set(self):\n sql = \"\"\"SELECT sample_set\n FROM barcodes.sample\n JOIN barcodes.sample_set USING (sample_set_id)\n WHERE sample_id = %s\n \"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql, [self.id])\n return pm.sql.TRN.execute_fetchlast()", "def print_molecule(molecule):\n\n return molecule[\"id\"]", "def pretty(self, sample_size: int = 20):\n collection_size = self._adapter.count()\n\n if sample_size > collection_size:\n sample_size = collection_size\n\n for doc in self._adapter.sample(sample_size):\n doc.pretty()\n print() # newline", "def samples(self, sample_id=None, count=None, offset=None, md5=None, sha1=None, sha256=None,\n format_name=None, format_group_name=None):\n if sample_id:\n logger.debug(\"Get sample\")\n return self._raw_api.samples.get(sample_id)\n\n logger.debug(\"Get list of samples\")\n data = filter_data(\n count=count,\n offset=offset,\n md5=md5,\n sha1=sha1,\n sha256=sha256,\n format_name=format_name,\n format_group_name=format_group_name\n )\n return self._raw_api.samples.get(json=data)", "def case_samples_data(case_id: str, report_api_mip_dna: MipDNAReportAPI):\n return report_api_mip_dna.status_db.get_case_samples_by_case_id(case_internal_id=case_id)", "def sample(self, number_samples: int = 1) -> List[Any]:\n # if prompt is provided, use it\n if self.prompt:\n item = self.model(batch_size=number_samples, prompt=self.prompt)\n else:\n item = self.model(batch_size=number_samples)\n\n # To support old diffusers versions (<0.6.0)\n if DIFFUSERS_VERSION_LT_0_6_0 or self.model_type in [\"geodiff\"]:\n item = item[\"sample\"]\n else:\n item = item.images\n\n return item", "def sample(ctx, projectid, sampleid):\n if ctx.parent.params['cluster'] == 'bianca':\n #in this case I need to open a sftp connection in order to avoid to insert password everytime\n projectObj = _deliver_castor.CastorProjectDeliverer(projectid, **ctx.parent.params)\n projectObj.create_sftp_connnection()\n #create the project folder in the remote server\n #move to delivery folder\n projectObj.sftp_client.chdir(projectObj.config.get('castordeliverypath', '/wharf'))\n projectObj.sftp_client.mkdir(projectid, ignore_existing=True)\n #move inside the project folder\n projectObj.sftp_client.chdir(projectid)\n for sid in sampleid:\n if ctx.parent.params['cluster'] == 'milou':\n d = _deliver.SampleDeliverer(\n projectid,\n sid,\n **ctx.parent.params)\n elif ctx.parent.params['cluster'] == 'mosler':\n d = _deliver_mosler.MoslerSampleDeliverer(\n projectid,\n sid,\n **ctx.parent.params)\n elif ctx.parent.params['cluster'] == 'bianca':\n d = _deliver_castor.CastorSampleDeliverer(\n projectid,\n sid,\n sftp_client=projectObj.sftp_client,\n **ctx.parent.params)\n elif ctx.parent.params['cluster'] == 'grus':\n logger.error(\"When delivering to grus only project can be specified, not sample\")\n return 1\n _exec_fn(d, d.deliver_sample)\n if ctx.parent.params['cluster'] == 'bianca':\n projectObj.close_sftp_connnection()", "def get_summary_by_id():\n sample_id = demisto.getArg('id')\n request = req('GET', SUB_API + 'samples/' + sample_id + '/summary')\n r = request.json()\n sample = {'ID': sample_id, 'AnalysisSummary': [], 'ArtifactsCount': []}\n\n # Search submissions request for extra information\n sub_request = req('GET', SUB_API + 'search/submissions',\n params={'api_key': API_KEY, 'q': demisto.get(r, 'data.sha256')})\n sub_r = sub_request.json()\n sub_r_first_item = demisto.get(sub_r, 'data.items')[0]\n\n sample['AnalysisSummary'] = {\n 'RegistryCount': demisto.get(r, 'data.registry_count'),\n 'FileName': demisto.get(r, 'data.filename'),\n 'SHA256': demisto.get(r, 'data.sha256'),\n 'SampleType': demisto.get(sub_r_first_item, 'item.analysis.metadata.submitted_file.type'),\n 'FirstSeen': demisto.get(r, 'data.first_seen'),\n 'LastSeen': demisto.get(r, 'data.last_seen'),\n }\n sample['ArtifactsCount'] = {\n 'Network': demisto.get(r, 'data.artifacts.network'),\n 'Disk': demisto.get(r, 'data.artifacts.disk'),\n 'Memory': demisto.get(r, 'data.artifacts.memory'),\n 'Extracted': demisto.get(r, 'data.artifacts.extracted')\n }\n md = tableToMarkdown('ThreatGrid - Sample Summary for ' + sample_id,\n [sample['AnalysisSummary']], ['RegistryCount', 'FileName',\n 'SHA256', 'SampleType', 'FirstSeen', 'LastSeen'])\n md += tableToMarkdown('ThreatGrid - Sample Artifacts', [sample['ArtifactsCount']],\n ['Network', 'Disk', 'Memory', 'Extracted'])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': sample},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': r\n })", "def sample(processDetail):\n return ProcessCPUPoller.sample(processDetail)", "def get_samples_from_patient_id(patient_id):\n all_files = FileRepository.all()\n q_pid = Q(metadata__cmoPatientId=patient_id)\n q_fg = build_argos_file_groups_query()\n q = q_pid & q_fg\n files = FileRepository.filter(queryset=all_files, q=q, filter_redact=True)\n data = list()\n for current_file in files:\n sample = dict()\n sample[\"id\"] = current_file.file.id\n sample[\"path\"] = current_file.file.path\n sample[\"file_name\"] = current_file.file.file_name\n sample[\"metadata\"] = current_file.metadata\n data.append(sample)\n\n samples = list()\n # group by igoId\n igo_id_group = dict()\n for sample in data:\n igo_id = sample[\"metadata\"][settings.SAMPLE_ID_METADATA_KEY]\n if igo_id not in igo_id_group:\n igo_id_group[igo_id] = list()\n igo_id_group[igo_id].append(sample)\n\n for igo_id in igo_id_group:\n samples.append(build_sample(igo_id_group[igo_id]))\n samples, bad_samples = remove_with_caveats(samples)\n number_of_bad_samples = len(bad_samples)\n if number_of_bad_samples > 0:\n LOGGER.warning(\"Some samples for patient query %s have invalid %i values\", patient_id, number_of_bad_samples)\n return samples", "def pull_sample_data(self, sample_id: str, sample_size: int or None = None, include_controls: bool = True,\n output_format: str = 'dataframe', columns_default: str = 'marker') -> None or list:\n db = connection.get_db(alias='core')\n db_name = db.name\n file_grp = self.pull_sample(sample_id)\n if not file_grp:\n return None\n files = file_grp.files\n # Fetch data\n if not include_controls: # Fetch data for primary file only\n file_id = [f for f in files if f.file_type == 'complete'][0].file_id\n connection.disconnect('core')\n connection._connections = {}\n connection._connection_settings = {}\n connection._dbs = {}\n FileGroup._collection = None\n\n complete = data_from_file(file_id=file_id,\n filegrp_id=file_grp.id,\n db_name=db_name,\n sample_size=sample_size,\n output_format=output_format,\n columns_default=columns_default)\n connection.connect(db=db_name, alias='core')\n return [complete]\n # Fetch data for primary file & controls\n files = [f.file_id for f in file_grp.files]\n connection.disconnect('core')\n connection._connections = {}\n connection._connection_settings = {}\n connection._dbs = {}\n FileGroup._collection = None\n\n pool = Pool(cpu_count())\n f = partial(data_from_file,\n filegrp_id=file_grp.id,\n db_name=db_name,\n sample_size=sample_size,\n output_format=output_format,\n columns_default=columns_default)\n data = pool.map(f, files)\n pool.close()\n pool.join()\n connection.connect(db=db_name, alias='core')\n return data", "def getAllSampleFields(self, sample_id, study_id):\n sample_tables = []\n sample_tables.append('sample')\n sample_tables.append('common_fields')\n sample_tables.append('extra_sample_')\n sample_tables.append('air')\n sample_tables.append('other_environment')\n sample_tables.append('sediment')\n sample_tables.append('soil')\n sample_tables.append('wastewater_sludge')\n sample_tables.append('water')\n sample_tables.append('host_assoc_vertibrate')\n sample_tables.append('host_associated_plant')\n sample_tables.append('human_associated')\n sample_tables.append('host_sample')\n sample_tables.append('host')\n \n filled_fields = {}\n \n con = self.getMetadataDatabaseConnection()\n cursor = con.cursor()\n \n for table in sample_tables:\n if 'extra_sample_' in table:\n statement = 'select * from %s%s where sample_id = %s' % (table, study_id, sample_id)\n elif table == 'host':\n statement = 'select * from host h inner join host_sample hs on h.host_id = hs.host_id where sample_id = %s' % sample_id\n else:\n statement = 'select * from %s where sample_id = %s' % (table, sample_id)\n \n try:\n cursor.execute(statement)\n except Exception, e:\n print str(e)\n print 'Error running query:\\n'\n print statement\n print 'Running next query...\\n'\n \n continue\n \n data = cursor.fetchall()\n\n # Get the column names\n col_names = []\n for i in range(0, len(cursor.description)):\n col_names.append(cursor.description[i][0])\n \n # Find the rows with data\n for row in data:\n i = 0\n for field in row:\n if field != None and field != '':\n filled_fields[col_names[i]] = field\n i += 1\n \n return filled_fields", "def pull_sample_mappings(self, sample_id: str):\n file_grp = self.pull_sample(sample_id)\n if not file_grp:\n return None\n mappings = dict()\n for f in file_grp.files:\n mappings[f.file_id] = [m.to_python() for m in f.channel_mappings]\n return mappings", "def print_item(group):\n print(\"\\tName: {}\".format(group.name))\n print(\"\\tId: {}\".format(group.id))\n if hasattr(group, 'location'):\n print(\"\\tLocation: {}\".format(group.location))\n print_properties(getattr(group, 'properties', None))", "def __getitem__(self, pid):\n pid = int(pid)\n list_of_samples = self.samples[pid][\n :\n ] # path, target, camid, idx <- in each inner tuple\n _len = len(list_of_samples)\n assert (\n _len > 1\n ), f\"len of samples for pid: {pid} is <=1. len: {len_}, samples: {list_of_samples}\"\n\n if _len < self.num_instances:\n choice_size = _len\n needPad = True\n else:\n choice_size = self.num_instances\n needPad = False\n\n # We shuffle self.samples[pid] as we extract instances from this dict directly\n random.shuffle(self.samples[pid])\n\n out = []\n for _ in range(choice_size):\n tup = self.samples[pid].pop(0)\n path, target, camid, idx = tup\n img = self.prepare_img(path)\n out.append(\n (img, target, camid, idx, True)\n ) ## True stand if the sample is real or mock\n\n if needPad:\n num_missing = self.num_instances - _len\n assert (\n num_missing != self.num_instances\n ), f\"Number of missings sample in the batch is equal to num_instances. PID: {pid}\"\n if self.resample:\n assert len(list_of_samples) > 0\n resampled = np.random.choice(\n range(len(list_of_samples)), size=num_missing, replace=True\n )\n for idx in resampled:\n path, target, camid, idx = list_of_samples[idx]\n img = self.prepare_img(path)\n out.append((img, target, camid, idx, True))\n else:\n img_mock = torch.zeros_like(img)\n for _ in range(num_missing):\n out.append((img_mock, target, camid, idx, False))\n\n assert (\n len(out) == self.num_instances\n ), f\"Number of returned tuples per id needs to be equal self.num_instance. It is: {len(out)}\"\n\n return out", "def fixture_samples(sample_single) -> Iterator[dict]:\n _samples = []\n sample_id = sample_single[\"sample_id\"]\n for number in range(3):\n sample = copy.deepcopy(sample_single)\n sample[\"sample_id\"] = \"_\".join([sample_id, str(number)])\n _samples.append(sample)\n return _samples", "def get_samples():\n r = req('GET', SUB_API + 'samples', params=handle_filters())\n samples = []\n for k in demisto.get(r.json(), 'data.items'):\n samples.append(sample_to_readable(k))\n md = tableToMarkdown('ThreatGrid - List of Samples', samples, [\n 'ID', 'Filename', 'State', 'Status', 'MD5', 'SHA1', 'SHA256', 'OS', 'SubmittedAt', 'StartedAt', 'CompletedAt'\n ])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': samples},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })" ]
[ "0.6150542", "0.56571805", "0.5590531", "0.54004705", "0.5333475", "0.53170794", "0.5293591", "0.5280334", "0.5169264", "0.5124405", "0.5093849", "0.5081533", "0.50650054", "0.5033113", "0.50301886", "0.4978106", "0.49227232", "0.48732486", "0.48366436", "0.48184904", "0.4813927", "0.4809194", "0.48084804", "0.48024577", "0.47892326", "0.478218", "0.47573185", "0.47539398", "0.47500417", "0.4732467" ]
0.69826764
0
Returns the ith score, counting from 1.
def getScore(self, i): return self.scores[i - 1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, n):\r\n \r\n if self.scores:\r\n return self.scores[n]\r\n else:\r\n return None", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def score(hand):\n occurrences = [] \n for die in hand:\n if die > len(occurrences):\n occurrences.extend([0 for dummy_idx in range(len(occurrences) ,die)]) \n occurrences[die - 1] += 1\n maxi = 0\n for idx in range(len(occurrences)):\n if (idx+1) * occurrences[idx] > maxi:\n maxi = (idx + 1) * occurrences[idx]\n return maxi", "def updateScore(score):\n return score + 1", "def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0", "def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score", "def get_score(self, score_index) -> float:\n return self._scores[score_index - 1]", "def score(self) -> int:\n return self._score", "def best_score(scores):\n idx, score = sorted(\n enumerate(scores), key=lambda e: e[1], reverse=scores[0].higher_better\n )[0]\n return (idx + 1, score)", "def score(name):\r\n return (sorted(test).index(name)+1)*value(name)", "def score():\n factor = 10\n current = (qno - wrong - 1) * factor\n return current", "def score(hand):\r\n \r\n if not hand:\r\n return 0\r\n \r\n max_score = 0\r\n \r\n for dice in hand:\r\n temp = list(hand).count(dice) * dice\r\n if temp > max_score:\r\n max_score = temp\r\n \r\n return max_score", "def get_score(self, card_index: int = 0) -> int:\n return self.get_score_list[card_index]", "def score(self) -> int:\n card_values = {\n '0': 0,\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '10': 10,\n 'JACK': 10,\n 'QUEEN': 10,\n 'KING': 10,\n 'ACE': 11}\n hand_value = []\n for i in self.cards:\n hand_value.append(card_values[i.value])\n while sum(hand_value) > 21 and 11 in hand_value:\n for i, j in enumerate(hand_value):\n if j == 11:\n hand_value[i] = 1\n break\n else:\n pass\n return sum(hand_value)", "def score(self):\n result = 1\n one_node = self.cups.locate_node(1)\n a = one_node.next()\n b = a.next()\n\n result = a.value * b.value\n\n return result", "def score(self):", "def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score", "def get_score(self) -> int:\n return self.rstate.score()", "def score(self) -> int:\n return self.__state.score()", "def fn(i):\n if i < 0: return 0 # boundary condition \n return scores[i] + max((fn(ii) for ii in range(i) if ages[ii] == ages[i] or scores[ii] <= scores[i]), default=0)", "def count(self, i):\n return sum([1 for j in self if i==j])", "def getScore(self,board):\n return board.getScore()[self.tile]", "def score(self):\n result = 0\n\n idx = self.cups.index(1)\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n # ok, keep adding things until we get back to 1\n while 1 != self.cups[idx]:\n # add this value..\n result *= 10\n result += self.cups[idx]\n # and on to the next one..\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n\n return result", "def getOccurence(self) -> int:\n ...", "def score(state, first_n):\n\n return sum([(i + first_n) for i, letter in enumerate(state) if letter == \"#\"])", "def majorityElement(self, nums: List[int]) -> int:\n num_count = dict()\n nums_length = len(nums)\n\n for i in nums:\n\n if dict.get(num_count, i):\n num_count[i] += 1\n else:\n num_count[i] = 1\n\n if num_count[i] >= nums_length / 2:\n return i", "def _score(self, x, seq):\n pass", "def increase_score(self):\n self.score += 1", "def getScore(self):\r\n return self._score", "def get_score(self):\n return self.score" ]
[ "0.6945054", "0.6740462", "0.65219384", "0.64139384", "0.6372708", "0.6367158", "0.6245017", "0.6234426", "0.62299573", "0.6182613", "0.61144584", "0.60755754", "0.60625494", "0.602287", "0.6008089", "0.59876007", "0.5977155", "0.593884", "0.5938777", "0.59238005", "0.5923437", "0.5920986", "0.5909377", "0.5903191", "0.58898", "0.58632946", "0.58521956", "0.5839959", "0.5822736", "0.5820191" ]
0.7393736
0
Returns the average score.
def getAverage(self): return sum(self.scores) / len(self.scores)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_average(self) -> float:\n return sum(self._scores) / len(self._scores)", "def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)", "def get_avg_score(game_id):\r\n\r\n scores = []\r\n game = Game.query.get(game_id)\r\n for rating in game.ratings:\r\n scores.append(rating.score)\r\n \r\n avg_score = sum(scores)/len(scores)\r\n \r\n \r\n return avg_score", "def get_average(self):\n self.avg = math.floor((self.maths + self.phy + self.che) / 3, )\n self.assign_grade()\n return self.avg\n # End of method get_average", "def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average", "def average(self):\n return self.summation() / self.count()", "def avg_e_score(self, entity):\n return float(entity['es']) / float(entity['count'])", "def average_score(self, sentenceValue):\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n\r\n # Average value of a sentence from original summary_text\r\n average = (sumValues / len(sentenceValue))\r\n\r\n return average", "def average_score(sentence_scores):\r\n sumValues = 0\r\n for score in sentence_scores:\r\n sumValues += sentence_scores[score]\r\n\r\n # Average value of a sentence from original text\r\n average = (sumValues / len(sentence_scores))\r\n\r\n return average", "def average_rating(self):\n ratings = Rating.objects.filter(game=self)\n\n if len(ratings):\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.value\n\n # Calculate the averge and return it.\n average = total_rating / len(ratings)\n return average\n\n # else: \n return 0", "def getAvg(self):\r\n\t\treturn self.data['avg']", "def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3) / 3", "def getAvg(self):\r\n\t\tdata = self.pair.data\r\n\t\tif data['avg'] == None:\r\n\t\t\treturn None\r\n\t\treturn 1. / self.pair.data['avg']", "def calc_mean_score(movies: List[Movie]) -> float:\n return round(sum([m.score for m in movies]) / len(movies), 1)", "def global_average_scores(self):\n\n return np.mean(self.average_scores_all_subjects(), axis=0)", "def average_rating(self):\n reviews = self.gamereview_set.all()\n\n try:\n return mean([ review.rating for review in reviews ])\n\n except StatisticsError:\n return None", "def calculate_avg_score(state_score,state_count):\n\tfor state in state_score.keys():\n\t\tstate_score[state] = 1.*state_score[state]/state_count[state]\n\treturn state_score", "def get_average_rating(self):\n count = 0\n total = 0\n ratings_length = len(self.ratings)\n if ratings_length > 0:\n for rating in self.ratings:\n count += 1\n total += rating\n average = total / count\n return average\n else:\n print(\"There does not seem to be any ratings for {book}\".format(book=self.title))", "def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3 + self.rating_4 + self.rating_5 + self.rating_6 + self.rating_7) / 7", "def average(self):\n return self.properties.get('average')", "def average_grade(self):\n grade_sum = 0\n grades_length = 0\n for c in self.courses_grades:\n if c[1] != \"-\":\n grade_sum += int(c[1])\n grades_length += 1\n average = grade_sum / grades_length\n return average", "def get_avg(self) -> float:\n if self._cur_elem_count < 1:\n return 0\n self._mtx.acquire()\n avg = self._sum / float(self._cur_elem_count)\n self._mtx.release()\n return avg", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def average(self):\n return (self.current + self.last) / 2.0", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def averaged_risk(self):\n return self._averaged_risk", "def averaged_risk(self):\n return self._averaged_risk", "def get_score(self):\n rewards, resets = self.runner.get_rewards_resets()\n self.runner.clear_rewards_resets()\n assert rewards.ndim == 1 and resets.ndim == 1, (rewards.ndim, resets.ndim)\n assert rewards.shape[0] == resets.shape[0], (rewards.shape, resets.shape)\n scores = [0]\n for t in reversed(range(rewards.shape[0])):\n if resets[t]:\n scores.append(0)\n scores[-1] += rewards[t]\n return np.mean(scores)" ]
[ "0.83748543", "0.82793206", "0.79504865", "0.79358387", "0.7766926", "0.76965094", "0.7663437", "0.7629189", "0.75364333", "0.7511791", "0.7506461", "0.750374", "0.745132", "0.744752", "0.7421295", "0.7383909", "0.73567045", "0.7320738", "0.73039734", "0.72678035", "0.7239264", "0.7208551", "0.7194458", "0.7142921", "0.71422815", "0.71422815", "0.71422815", "0.7129805", "0.7129805", "0.71111965" ]
0.85833955
0
Returns the highest score.
def getHighScore(self): return max(self.scores)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_score(self):\n return max(self._extract_set('score') or [0])", "def max_score(self):\r\n return self.lcp.get_max_score()", "def getNextHighest(self):\r\n maxScore = -1\r\n idx = -1\r\n for i, s in enumerate(self.scores):\r\n if s.score > maxScore:\r\n maxScore = s.score\r\n idx = i\r\n if idx != -1:\r\n score = self.scores[idx]\r\n del self.scores[idx]\r\n return score\r\n else:\r\n return None", "def max_score(self):\r\n max_score = None\r\n if self.check_if_done_and_scored():\r\n max_score = self._max_score\r\n return max_score", "def get_high_score(self) -> float:\n return max(self._scores)", "def max_score(self):\n return self.points", "def __get_best_score(scores):\n best = max(scores.items(), key=operator.itemgetter(1))[0]\n print(\"The best classification for this corpus is: \" + str(best))\n return best", "def personal_best(scores):\n return max(scores)", "def max_score(self):\n return self.raw_possible", "def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)", "def personal_best(scores: list) -> int:\n return max(scores)", "def get_score(self):\n return np.max(self._scores) if self._scores is not None else self._score_history[-1]", "def maximum_score(self):\n max_score = self.values('question').annotate(\n top_answer=Max('score')\n )\n max_score = sum(d['top_answer'] for d in max_score)\n return max_score", "def get_max_score(self):\r\n maxscore = 0\r\n for responder in self.responders.values():\r\n maxscore += responder.get_max_score()\r\n return maxscore", "def max(scores):\n return __builtin__.max(scores) if len(scores) else 0", "def get_max_score(self):\r\n return sum(self.maxpoints.values())", "def getMaxAlignmentScore(self):\n # get max of each row\n # max_scores = [max(i) for i in self.matrix]\n\n # return the max of the max vaules\n return numpy.max(self.matrix)", "def worst_score(self):\r\n pass", "def get_highscore(self, score):\n scores = list(self.history_score.values())\n \n # Compare current score with the last placing in leaderboard.\n if score > max(scores):\n return 0\n else:\n if score < min(scores):\n return 2\n else:\n return 1", "def get_best( self ):\n if len(self.listScore) < 1:\n if self.bMinimumIsBest: return 9999,\"Unknown\"\n else: return -1,\"Unknown\"\n return self.listScore[0]", "def latest(scores: list) -> int:\n return scores[-1]", "def latest_score(self):\r\n if not self.child_history:\r\n return None\r\n return self.child_history[-1].get('score')", "def negamax(self):\n if self.check_winner():\n return 1\n elif self.full():\n return 0\n else:\n bestScore = -10\n for r, c in self.empty_cells():\n self.grid[r][c] = self.player\n self.next_player() \n score = -self.negamax()\n if score > bestScore:\n bestScore = score\n self.grid[r][c] = GameModel.EMPTY\n self.next_player()\n return bestScore", "def get_player_best_score(self, player):\n return self.get_highscores().filter(player=player).first()", "def highest_rank(self):\n return max(self.cards).rank", "def test_get_max_score(self):\r\n max_score = self.peer_grading.max_score()\r\n self.assertEquals(max_score, None)", "def pwm_max_score(self):\n if self.max_score is None:\n score = 0\n for row in self.pwm:\n score += log(max(row) / 0.25 + 0.01)\n self.max_score = score\n \n return self.max_score", "def max_score_test(self):\n max_score_tuple = self.results.max_score(molecules=[\"DDSPDLPK\"])\n assert max_score_tuple[0] == 1 # score\n assert max_score_tuple[3].scaling_factor == 100 # intensity\n\n assert self.results.max_score(molecules=[\"_DDSPDLPK_\"]) == [0, None, None, None]\n return", "def get_best_score():\n\n best_score = db.session.query(func.max(Game.score)).filter(Game.status == \"won\").first()\n score_user = db.session.query(Game.score, User.username).join(User).filter(Game.score == best_score, Game.status == \"won\").first()\n\n return score_user", "def latest_score(self):\r\n if not self.child_history:\r\n return None\r\n return self.score_for_attempt(-1)" ]
[ "0.8818188", "0.84807724", "0.835259", "0.8324772", "0.82806855", "0.81326896", "0.80440915", "0.79591155", "0.7943476", "0.7934118", "0.785137", "0.7810449", "0.7779032", "0.76996386", "0.7575776", "0.7553467", "0.7516782", "0.74972826", "0.74240077", "0.74194306", "0.7411322", "0.7405073", "0.73216486", "0.7305021", "0.72841513", "0.7274494", "0.7257", "0.72416824", "0.7188179", "0.71538484" ]
0.8535671
1
Tweaks the continuum by randomly removing units ("false negatives"). Every unit (for each annotator) have a probability equal to the magnitude of being removed. If this probability is one, a single random unit (for each annotator) will be left alone.
def false_neg_shuffle(self, continuum: Continuum) -> None: for annotator in continuum.annotators: security = np.random.choice(continuum._annotations[annotator]) # security : if an annotator doesnt have any annotations gamma cant be computed. for unit in list(continuum[annotator]): if np.random.random() < self.magnitude: continuum.remove(annotator, unit) if len(continuum._annotations[annotator]) == 0: continuum.add(annotator, security.segment, security.annotation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def false_pos_shuffle(self, continuum: Continuum) -> None:\n ref_units = self._reference_continuum[self._reference_annotator]\n avg_dur = np.average([unit.segment.end - unit.segment.start for unit in ref_units])\n var_dur = np.std([unit.segment.end - unit.segment.start for unit in ref_units])\n category_weights = self._reference_continuum.category_weights\n bounds_inf, bounds_sup = self._reference_continuum.bound_inf, self._reference_continuum.bound_sup\n for annotator in continuum.annotators:\n for _ in range(int(self.magnitude * self.FALSE_POS_FACTOR * len(self._reference_continuum))):\n # a random unit is generated from a (all random) central point, duration, and category\n category = np.random.choice(category_weights.keys(), p=category_weights.values())\n center = np.random.uniform(bounds_inf, bounds_sup)\n duration = abs(np.random.normal(avg_dur, var_dur))\n continuum.add(annotator,\n Segment(center - duration / 2, center + duration / 2),\n annotation=category)", "def mute(individual):\n mutatePt=random.randint(0,len(individual)-1)\n if mutatePt==0:\n individual[mutatePt]=random.uniform(kNN.features_min[0], kNN.features_max[0])\n elif mutatePt==2:\n individual[mutatePt]=random.uniform(kNN.features_min[1], kNN.features_max[1])\n elif mutatePt==3:\n individual[mutatePt]=random.uniform(kNN.features_min[2], kNN.features_max[2])\n elif mutatePt==4:\n individual[mutatePt]=random.uniform(kNN.features_min[3], kNN.features_max[3])\n elif mutatePt==5:\n individual[mutatePt]=random.uniform(kNN.features_min[4], kNN.features_max[4])\n\n return individual,", "def shift_shuffle(self, continuum: Continuum) -> None:\n shift_max = self.magnitude * self.SHIFT_FACTOR * \\\n self._reference_continuum.avg_length_unit\n for annotator in continuum.annotators:\n for unit in continuum[annotator]:\n continuum.remove(annotator, unit)\n start_seg, end_seg = 0.0, 0.0\n while start_seg >= end_seg:\n start_seg = unit.segment.start + np.random.uniform(-1, 1) * shift_max\n end_seg = unit.segment.end + np.random.uniform(-1, 1) * shift_max\n continuum.add(annotator, Segment(start_seg, end_seg), unit.annotation)", "def test_no_cosmics(self):\n prng = np.random.RandomState(84287)\n y = prng.normal(size=1000)\n\n y2 = remove_cosmics(y)\n\n np.testing.assert_array_equal(y, y2)", "def splits_shuffle(self, continuum: Continuum):\n for _ in range(int(self.magnitude *\n self.SPLIT_FACTOR *\n self._reference_continuum.avg_num_annotations_per_annotator)):\n for annotator in continuum.annotators:\n units = continuum._annotations[annotator]\n to_split = units.pop(numpy.random.randint(0, len(units)))\n security = (to_split.segment.end - to_split.segment.start) * 0.01\n cut = numpy.random.uniform(to_split.segment.start + security, to_split.segment.end)\n\n\n try:\n continuum.add(annotator, Segment(cut, to_split.segment.end), to_split.annotation)\n continuum.add(annotator, Segment(to_split.segment.start, cut), to_split.annotation)\n except ValueError:\n continuum.add(annotator, to_split.segment, to_split.annotation)\n continuum.add(annotator, to_split.segment, to_split.annotation)", "def beat_stripper(timings, removal_proportion):\n phrase_len = len(timings)\n n_to_rm = int(phrase_len * removal_proportion)\n rm_idx = np.random.randint(0, phrase_len-1, n_to_rm)\n\n timings_stripped = []\n for i, hit in enumerate(timings):\n if not i in rm_idx:\n timings_stripped.append(hit)\n else:\n timings_stripped.append(hit + hit)\n return timings_stripped", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n repeat_ = num_expected // neg_inds.numel()\n return torch.cat((neg_inds.repeat(repeat_), self.random_choice(neg_inds, num_expected % neg_inds.numel())))\n else:\n return self.random_choice(neg_inds, num_expected)", "def test_cosmic_at_beginning_ignored(self):\n prng = np.random.RandomState(123459)\n y = prng.normal(size=1000)\n\n y[0] = -14000.\n\n y2 = remove_cosmics(y)\n\n # assert that all the points are unchanged\n np.testing.assert_array_equal(y, y2)", "def unrandomize(self):\n\t\tif not self.hasRandomizedAmplitudes:\n\t\t\tprint \"Warning: 'unrandomize' called, but has not been randomized\"\n\t\tfor i in range(self.totalBins):\n\t\t\tself.reals[i] -= self.randoms[2*i ]\n\t\t\tself.imags[i] -= self.randoms[2*i+1]\n\t\tself.hasRandomizedAmplitudes = False\n\t\tdel self.randoms", "def cointoss():\n return random.random() < 0.5", "def _u_naught_simple(self):\n # Random is better to give different multipliers in the subgradient phase\n return np.random.rand(self.mrows)*1.", "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n return neg_inds\n else:\n return self.random_choice(neg_inds, num_expected)", "def remove_functional_groups(pct_C1A, pct_E1A, pct_H1A, atom_list):\n carboxyl_map = get_carboxyl_map(atom_list)\n epoxy_map = get_epoxy_map(atom_list)\n hydroxyl_map = get_hydroxyl_map(atom_list)\n remove_C1A = round(len(carboxyl_map) * pct_C1A)\n remove_E1A = round(len(epoxy_map) * pct_E1A)\n remove_H1A = round(len(hydroxyl_map) * pct_H1A)\n while (remove_C1A > 0):\n remove_C1A -= 1\n remove_group = random.choice(carboxyl_map)\n carboxyl_map.remove(remove_group)\n for element in remove_group:\n atom_list.remove(element)\n del element\n while (remove_E1A > 0):\n remove_E1A -= 1\n remove_group = random.choice(epoxy_map)\n epoxy_map.remove(remove_group)\n for element in remove_group:\n atom_list.remove(element)\n del element\n while (remove_H1A > 0):\n remove_H1A -= 1\n remove_group = random.choice(hydroxyl_map)\n hydroxyl_map.remove(remove_group)\n for element in remove_group:\n atom_list.remove(element)\n del element\n return atom_list", "def create_neg_sample_list(word_counts):\n negatives = []\n pow_freq = np.array(list(word_counts.values()))**0.75\n sum_pow_freq = np.sum(pow_freq)\n ratio = pow_freq / sum_pow_freq\n count = np.round(ratio * 1e6)\n max_sample_id = len(count)\n for wid, c in enumerate(count):\n negatives += [wid] * int(c)\n negatives = np.array(negatives)\n np.random.shuffle(negatives)\n return negatives", "def unforeseen():\r\n return random.gauss(300., 100.)", "def remove_chromosome(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n del mutated_genome[index]", "def reset(self):\n self.differences = np.zeros(self.differences.shape)\n r_i = np.random.randint(len(self.differences))\n self.differences[r_i] = .001 # This will automatically get selected as the BMU next time", "def randomize(self):\n random_pfm = [[c for c in row] for row in self.pfm]\n random.shuffle(random_pfm)\n m = Motif(pfm=random_pfm)\n m.id = \"random\"\n return m", "def draw_negative_sample(c):\n caption = c\n while True:\n assert len(captions) >= 2 # in case we ran out...\n capt_idx = r.randint(len(captions))\n caption = captions[capt_idx]\n if caption == c:\n continue\n if len(idxes_by_caption[caption]) == 0:\n del captions[capt_idx]\n del idxes_by_caption[caption]\n continue\n # print('caption', caption)\n res = idxes_by_caption[caption].pop()\n if len(idxes_by_caption[caption]) == 0:\n del idxes_by_caption[caption]\n del captions[capt_idx]\n return res", "def remove_n_nos(self, num_nos):\n for i in range(num_nos):\n elem = random.randint(1, 11 ** 4)\n self.remove(elem)", "def sample_bernoulli(self, probabilities):\n return tf.nn.relu(tf.sign(probabilities - tf.random.uniform(probabilities.shape)))", "def direction_correction(self):\n self.directions.monster = random.uniform(self.directions.monster * self.get_monster_sensitivity(),\n self.directions.monster * (1 + (1 - self.get_monster_sensitivity())))\n self.directions.food = random.uniform(self.directions.food * self.get_food_sensitivity(),\n self.directions.food * (1 + (1 - self.get_food_sensitivity())))\n self.directions.water = random.uniform(self.directions.water * self.get_water_sensitivity(),\n self.directions.water * (1 + (1 - self.get_water_sensitivity())))", "def test_text_classifier_del_testing_samples(self):\n pass", "def cull(self):\n num = 0\n stat = 1\n i = 0\n while num < self.num // 2:\n if random.random() < stat and self.genepool[0][i%self.num] != None:\n num += 1\n self.genepool[0][i%self.num] = None\n self.genepool[1][i%self.num] = None\n stat -= 1.5*stat/self.num\n i += 1", "def cull(self):\n num = 0\n stat = 1\n i = 0\n while num < self.num // 2:\n if random.random() < stat and self.genepool[0][i%self.num] != None:\n num += 1\n self.genepool[0][i%self.num] = None\n self.genepool[1][i%self.num] = None\n stat -= 1.5*stat/self.num\n i += 1", "def test_deterministic(self):\n add_noise = self.variant(exploration.add_dirichlet_noise)\n\n # Test that noisy and noisless actions match for zero Dirichlet noise\n for _ in range(10):\n prior = np.random.normal(0., 1., (self._batch_size, self._num_actions))\n\n # Test output.\n self._rng_key, key = jax.random.split(self._rng_key)\n noisy_prior = add_noise(\n key, prior, dirichlet_alpha=0.3, dirichlet_fraction=0.)\n np.testing.assert_allclose(prior, noisy_prior)", "def sample_skills_to_be_covered(self, fraction=1.0):\n self.skills_covered = np.zeros(self.num_skills)\n if fraction < 1.0:\n num_sampled_skills = int(fraction * self.num_skills)\n sampled_skills = np.random.choice(self.num_skills, size=num_sampled_skills, replace=False)\n\n for skill_id in range(self.num_skills):\n if skill_id not in sampled_skills:\n self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered\n\n self.skills_covered = self.skills_covered.astype(bool)", "def exclude_ice_trap_misery(weight_dict, random_settings):\n weights = weight_dict['junk_ice_traps']\n if 'mayhem' in weights.keys() and random_settings['damage_multiplier'] in ['quadruple', 'ohko']:\n weights.pop('mayhem')\n if 'onslaught' in weights.keys() and random_settings['damage_multiplier'] in ['quadruple', 'ohko']:\n weights.pop('onslaught')\n random_settings['junk_ice_traps'] = random.choices(list(weights.keys()), weights=list(weights.values()))[0]", "def word_dropout(tokens, dropout):\n return [constant.UNK_ID if x != constant.UNK_ID and np.random.random() < dropout \\\n else x for x in tokens]", "def remove_enemies(level, amount):\n enemy_pos = get_positions(level, ENEMY1)\n enemy_pos += get_positions(level, ENEMY2)\n enemy_pos += get_positions(level, ENEMY3)\n\n random.shuffle(enemy_pos)\n for _ in range(min(len(enemy_pos), amount)):\n pos = enemy_pos.pop()\n level[pos] = EMPTY" ]
[ "0.636054", "0.59033686", "0.584821", "0.5758323", "0.56914926", "0.5662757", "0.56101835", "0.56001", "0.54565287", "0.5447088", "0.53587425", "0.5317047", "0.5300742", "0.5276981", "0.52715397", "0.5239773", "0.52153003", "0.52076566", "0.51970094", "0.51903534", "0.517286", "0.5161747", "0.51581156", "0.5157873", "0.5157873", "0.5149622", "0.51486284", "0.5137292", "0.513711", "0.51327884" ]
0.74843484
0
Tweaks the continuum by randomly adding "false positive" units. The number of added units per annotator is constant & proportionnal to the magnitude of the CST. The chosen category is random and depends on the probability of occurence of the category in the reference. The length of the segment is random (normal distribution) based on the average and standard deviation of those of the reference.
def false_pos_shuffle(self, continuum: Continuum) -> None: ref_units = self._reference_continuum[self._reference_annotator] avg_dur = np.average([unit.segment.end - unit.segment.start for unit in ref_units]) var_dur = np.std([unit.segment.end - unit.segment.start for unit in ref_units]) category_weights = self._reference_continuum.category_weights bounds_inf, bounds_sup = self._reference_continuum.bound_inf, self._reference_continuum.bound_sup for annotator in continuum.annotators: for _ in range(int(self.magnitude * self.FALSE_POS_FACTOR * len(self._reference_continuum))): # a random unit is generated from a (all random) central point, duration, and category category = np.random.choice(category_weights.keys(), p=category_weights.values()) center = np.random.uniform(bounds_inf, bounds_sup) duration = abs(np.random.normal(avg_dur, var_dur)) continuum.add(annotator, Segment(center - duration / 2, center + duration / 2), annotation=category)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def false_neg_shuffle(self, continuum: Continuum) -> None:\n for annotator in continuum.annotators:\n security = np.random.choice(continuum._annotations[annotator])\n # security : if an annotator doesnt have any annotations gamma cant be computed.\n for unit in list(continuum[annotator]):\n if np.random.random() < self.magnitude:\n continuum.remove(annotator, unit)\n if len(continuum._annotations[annotator]) == 0:\n continuum.add(annotator, security.segment, security.annotation)", "def shift_shuffle(self, continuum: Continuum) -> None:\n shift_max = self.magnitude * self.SHIFT_FACTOR * \\\n self._reference_continuum.avg_length_unit\n for annotator in continuum.annotators:\n for unit in continuum[annotator]:\n continuum.remove(annotator, unit)\n start_seg, end_seg = 0.0, 0.0\n while start_seg >= end_seg:\n start_seg = unit.segment.start + np.random.uniform(-1, 1) * shift_max\n end_seg = unit.segment.end + np.random.uniform(-1, 1) * shift_max\n continuum.add(annotator, Segment(start_seg, end_seg), unit.annotation)", "def randomCuts(self,g,Nb):\n # A1 ~ Unif[0,N-1-(Nc-1)(g-1)[\n A1 = np.random.randint(1, self.numMonomers-1-(Nb-1)*(g+1)-1)\n return A1 + np.arange(Nb)*(1+g)", "def get_rand_cat(self):\n return randint(0,GAConfig[\"num_categories\"]-1)", "def splits_shuffle(self, continuum: Continuum):\n for _ in range(int(self.magnitude *\n self.SPLIT_FACTOR *\n self._reference_continuum.avg_num_annotations_per_annotator)):\n for annotator in continuum.annotators:\n units = continuum._annotations[annotator]\n to_split = units.pop(numpy.random.randint(0, len(units)))\n security = (to_split.segment.end - to_split.segment.start) * 0.01\n cut = numpy.random.uniform(to_split.segment.start + security, to_split.segment.end)\n\n\n try:\n continuum.add(annotator, Segment(cut, to_split.segment.end), to_split.annotation)\n continuum.add(annotator, Segment(to_split.segment.start, cut), to_split.annotation)\n except ValueError:\n continuum.add(annotator, to_split.segment, to_split.annotation)\n continuum.add(annotator, to_split.segment, to_split.annotation)", "def category_shuffle(self, continuum: Continuum,\n overlapping_fun: Callable[[str, str], float] = None,\n prevalence: bool = False):\n category_weights = self._reference_continuum.category_weights\n # matrix \"A\"\n nb_categories = len(category_weights)\n prob_matrix = np.eye(nb_categories)\n # matrix \"B or C\"\n if prevalence:\n sec_matrix = np.ones(nb_categories) / nb_categories\n else:\n sec_matrix = np.array([list(category_weights.values())] * nb_categories)\n\n categories = list(category_weights.keys())\n if overlapping_fun is None:\n # this formula was deduced from the graphs.\n prob_matrix = prob_matrix * (1 - self.magnitude ** 2) + sec_matrix * self.magnitude ** 2\n else:\n overlapping_matrix = np.zeros((len(categories), len(categories)))\n for id1, cat1 in enumerate(categories):\n sum_line = 0\n for id2, cat2 in enumerate(categories):\n elem = overlapping_fun(cat1, cat2)\n sum_line += elem\n overlapping_matrix[id1, id2] = elem\n overlapping_matrix[id1] /= sum_line\n # this formula was also deduced from the graphs.\n prob_matrix = (prob_matrix * (1 - self.magnitude)\n + sec_matrix * self.magnitude ** 3\n + overlapping_matrix * (self.magnitude - self.magnitude ** 3)\n )\n for annotator in continuum.annotators:\n for unit in list(continuum[annotator]):\n continuum.remove(annotator, unit)\n try:\n new_category = np.random.choice(categories, p=prob_matrix[category_weights.index(unit.annotation)])\n except ValueError as e:\n raise e\n continuum.add(annotator, Segment(unit.segment.start, unit.segment.end), new_category)\n del unit", "def _u_naught_simple(self):\n # Random is better to give different multipliers in the subgradient phase\n return np.random.rand(self.mrows)*1.", "def define_translocations(genome, num, nc):\n start = []\n end = []\n for n in range(num):\n start_pos = random.randint(100,len(genome.seq)-5100) # positions 100bp from start or end will not be variable\n end_pos = start_pos + random.randint(500,5000)\n start.append(start_pos)\n end.append(end_pos)\n\n if nc: # if non-conservative translocations specified\n del_start = []\n del_end = []\n nc_pos = [p for p in range(0, len(start))]\n for n in range(0,len(start),2): # 50:50 chance that half will be non-conserved\n if not del_start or random.randint(0,1) == 0: # ensures at least 1 will be non-conserved\n length = len(nc_pos)\n pop_pos = random.randint(0,length-1)\n idx = nc_pos.pop(pop_pos)\n nc_size = random.randint(100, ((end[idx]-start[idx])//2)-1) # size between 100 and half the translocation size\n start_pos = end[idx]-nc_size\n end_pos = end[idx]\n del_start.append(start_pos)\n del_end.append(end_pos)\n end[idx] = start_pos\n # add new deletion Variants to genome list\n var = Variant(\"deletion\", start_pos, end_pos, start_pos-end_pos)\n genome.add_variant(var)\n # add new deletions to unavail list\n for j in range(start_pos, end_pos):\n genome.unavail_pos.append(j)\n\n # add translocation Variants to genome list\n for v in range(len(start)):\n pos = get_trans_pos(genome) # get new position\n # add either side of insertion point to unavail list\n genome.unavail_pos.append(pos-1)\n genome.unavail_pos.append(pos)\n genome.unavail_pos.append(pos+1)\n # add translocated region to unavail list\n for j in range(start[v], end[v]):\n genome.unavail_pos.append(j)\n # add Variant to genome's variant list\n var = Variant(\"translocation origin\", start[v], pos, end[v]-start[v])\n genome.add_variant(var)\n var = Variant(\"translocation insert\", pos, start[v], end[v]-start[v])\n genome.add_variant(var)", "def mimic_lecture(mimic_dict, starting_word=None, limit=10):\r\n\r\n if not starting_word:\r\n word = ''\r\n else:\r\n word = starting_word\r\n\r\n line_label = (divmod(x*3, 60) for x in numGen())\r\n text = word\r\n text = str(next(line_label)) + word\r\n line_mark = 0\r\n line_words = 1\r\n while line_mark < limit:\r\n if word not in mimic_dict:\r\n word = \"\"\r\n new_word = random.choice(mimic_dict[word])\r\n\r\n if line_words == 0:\r\n rand_silence = random.randint(1,10)\r\n if rand_silence <= 2:\r\n text = text + \"\\n\" + str(next(line_label))\r\n line_words = 0\r\n line_mark += 1\r\n word = new_word\r\n continue\r\n\r\n text = text + \" \" + new_word\r\n rand = random.randint(3,5)\r\n if line_words >= rand:\r\n text = text + \"\\n\" + str(next(line_label))\r\n line_words = 0\r\n line_mark += 1\r\n else:\r\n line_words += 1\r\n word = new_word\r\n return text", "def sample(self, k):\n result = \"\"\n current = self.gen_beginning()\n for i in range(0, k):\n result += current[0] + \" \"\n t = tuple(current)\n if t in self.dict:\n c_sum = self.dict[t][self.sum_index]\n rand = random.randint(0, c_sum)\n new_term = \"\"\n for term, count in self.dict.iteritems():\n if rand > count:\n rand -= count\n else:\n new_term = term\n break\n current.remove(current[0])\n current.append(new_term)\n else:\n current = self.gen_beginning()\n return result", "def generate_random_DC_crack_coupled_tensor():\n # 1. Generate random DC MT and crack MT:\n # generate DC MT:\n DC_MT_to_rot = np.vstack(([0.,0.,1.],[0.,0.,0.], [1.,0.,0.])) # DC moment tensor\n # generate crack MT (from Tape 2013, eq. 41 and Fig. 6):\n # To randomly generate lune_perim_angle (from Tape 2013 eq. 41 and Fig. 6):\n # --- To randomly generate lune_perim_angle (from Tape 2013 eq. 41 and Fig. 6) ---:\n # Get between small range:\n theta_lune_sphere = np.random.uniform(-1.,1.)*np.pi/2.\n random_num = random.random()\n if random_num <= 0.5:\n phi_lune_sphere = 0. #np.pi/6. #0. #np.pi/6.\n elif random_num > 0.5:\n phi_lune_sphere = np.pi/3 #-1.*np.pi/6. #np.pi/3 #-1.*np.pi/6.\n # calculate lune_perim_angle, allowing for outside tan(-pi/2->pi/2):\n lune_perim_angle = np.arctan(np.sin(phi_lune_sphere)/np.sin(theta_lune_sphere)) # Generates uniform distribution of lune crack angle in Lune plot space #random.random()*2.*np.pi # Random number in uniform distribution betwen 0 and 2 pi\n # And redistribute evenly everywhere on boundary:\n random_num = random.random()\n if random_num>0.25 and random_num<=0.5:\n lune_perim_angle = lune_perim_angle+np.pi # Allow to use full 2 pi space\n if random_num>0.5 and random_num<=0.75:\n lune_perim_angle = lune_perim_angle+np.pi/2 # Allow to use full 2 pi space\n if random_num>0.75 and random_num<=1.0:\n lune_perim_angle = lune_perim_angle+3*np.pi/2 # Allow to use full 2 pi space\n # --- ---\n # random_num = random.random()\n # if random_num <= 0.5:\n # theta_lune_sphere = random.random()*np.pi/2.\n # elif random_num > 0.5:\n # theta_lune_sphere = -1.*random.random()*np.pi/2.\n # random_num = random.random()\n # if random_num <= 0.5:\n # phi_lune_sphere = np.pi/6.\n # elif random_num > 0.5:\n # phi_lune_sphere = -1.*np.pi/6.\n # lune_perim_angle = np.arctan(np.sin(phi_lune_sphere)/np.sin(theta_lune_sphere)) # Generates uniform distribution of lune crack angle in Lune plot space #random.random()*2.*np.pi # Random number in uniform distribution betwen 0 and 2 pi\n crack_MT_to_rot = ((((4*(np.sin(lune_perim_angle)**2)) + (np.cos(lune_perim_angle)**2))**-0.5)/np.sqrt(3.)) * np.vstack(([np.cos(lune_perim_angle)-(np.sqrt(2)*np.sin(lune_perim_angle)),0.,0.],[0.,np.cos(lune_perim_angle)-(np.sqrt(2)*np.sin(lune_perim_angle)),0.], [0.,0.,np.cos(lune_perim_angle)+(2.*np.sqrt(2)*np.sin(lune_perim_angle))])) # crack moment tensor\n # 2. Combine DC and crack tensors:\n random_amp_frac = random.random() # random number between 0. and 1., for relative amplitude of DC and crack fractions.\n DC_crack_MT_to_rot =random_amp_frac*DC_MT_to_rot + (1.-random_amp_frac)*crack_MT_to_rot\n # 3. Randomly rotate DC-crack MT to random orientation:\n # Get a random sample 3-vector on a 3-unit sphere to use to calculate random theta and phi rotation angles:\n a_unnormalised = np.array([np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0), np.random.normal(loc=0.0, scale=1.0)], dtype=float) # Generate 3 indepdendent normal deviates\n a_normalised = a_unnormalised/(np.sum(a_unnormalised**2)**-0.5) # Normallise sample onto unit 3-sphere - As in Muller (1959)\n # And normallise so that vector magnitude = 1:\n a_normalised = a_normalised/((np.sum(a_normalised**2))**0.5)\n x = a_normalised[0]\n y = a_normalised[1]\n z = a_normalised[2]\n theta = np.arctan2(np.sqrt((x**2)+(y**2)),z) #np.arccos(z)\n phi = np.arctan2(y,x) #np.arccos(x/np.sin(theta))\n DC_crack_MT_rotated = rot_mt_by_theta_phi(DC_crack_MT_to_rot, theta, phi)\n # 4. Normalise and get 6 MT:\n # Get 6 MT:\n DC_crack_six_MT_rotated = get_six_MT_from_full_MT_array(DC_crack_MT_rotated)\n # And normallise so that moment tensor magnitude = 1:\n DC_crack_six_MT_rotated_normalised = DC_crack_six_MT_rotated/((np.sum(DC_crack_six_MT_rotated**2))**0.5)\n # And set to correct dimensions (so matrix multiplication in forward model works correctly):\n DC_crack_six_MT_rotated_normalised = np.reshape(DC_crack_six_MT_rotated_normalised, (6, 1))\n return DC_crack_six_MT_rotated_normalised, random_amp_frac", "def strategiaa(stan_gry):\n ruch = min(random.randint(1,3), stan_gry)\n return ruch", "def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)", "def make_cnv(self):\r\n\r\n self.cnv = random.sample(range(1, 25), 4)\r\n self.c_array = random.sample(self.c_array, len(self.c_array))\r\n score = float(0)\r\n common = [i for i in self.cnv[1:] if i in self.c_array]\r\n if self.cnv[0] in self.c0_array:\r\n score += 0.25\r\n elif self.cnv[0] not in self.c0_array:\r\n score -= 0.25\r\n for i in range(len(common)):\r\n score += 0.25\r\n if score > 1:\r\n score = 1\r\n elif score < 0:\r\n score = 0\r\n return self.cnv.append(score)", "def Generate_Clumped( self, nClump, nAttract ):\n #nClump = 10\n #nAttract = 10\n CLUMP = {}\n for c in range( 1, nClump+1, 1 ):\n CLUMP[c] = (random.uniform( 0, 208.71), random.uniform( 0, 208.71) )\n stands = self.Data.Stand.keys()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n c = int(random.uniform( 1, nClump ))\n b = random.uniform( 0, 360 )\n v = random.uniform( 0, nAttract )\n (vx, vy) = self.Compute_Offset( b, v )\n (x, y) = CLUMP[c]\n if( (x + vx) > 208.71 ): x -= vx\n else: x += vx\n if( (y + vy) > 208.71 ): y -= vy\n else: y += vy\n if( y < 0 ): y *= -1.0\n self.Data.Stand[s].Tree[t].X = x\n self.Data.Stand[s].Tree[t].Y = y", "def cointoss():\n return random.random() < 0.5", "def _generate_random_acoustic(nb_points):\n mean_acoustic = (max_acoustic + min_acoustic) / 2.\n span_acoustic = abs(max_acoustic - mean_acoustic)\n acoustic_data = np.random.rand(nb_points, fmd.DATA_DIMENSION - 1)\n acoustic_data *= span_acoustic\n acoustic_data += mean_acoustic\n return acoustic_data", "def generate_cluster_name():\n ADJECTIVES = (\n \"autumn\", \"hidden\", \"bitter\", \"misty\", \"silent\", \"empty\", \"dry\", \"dark\",\n \"summer\", \"icy\", \"quiet\", \"white\", \"cool\", \"winter\", \"quick\",\n \"patient\", \"twilight\", \"crimson\", \"wispy\", \"weathered\", \"blue\",\n \"broken\", \"cold\", \"damp\", \"falling\", \"frosty\", \"green\",\n \"lingering\", \"bold\", \"little\", \"morning\", \"muddy\", \"old\",\n \"red\", \"rough\", \"still\", \"small\", \"sparkling\", \"tasty\", \"shy\",\n \"wandering\", \"withered\", \"wild\", \"black\", \"mellow\" \"holy\", \"solitary\",\n \"snowy\", \"proud\", \"floral\", \"restless\", \"divine\",\n \"ancient\", \"purple\", \"lively\", \"nameless\", \"tossed\"\n )\n\n ANIMAL_NOUNS = (\n \"alligators\", \"crocodiles\", \"ants\", \"antelopes\", \"badgers\", \"bees\",\n \"buffalos\", \"butterflies\", \"cheetahs\", \"coyotes\", \"dolphins\", \"elephants\",\n \"foxes\", \"giraffes\", \"gorillas\", \"hedgehogs\", \"hornets\", \"hyenas\", \"jackals\",\n \"kangaroos\", \"leopards\", \"lions\", \"lizards\", \"mammoths\", \"porcupines\",\n \"rabbits\", \"racoons\", \"rhinos\", \"sharks\", \"snails\", \"snakes\", \"spiders\",\n \"squirrels\", \"tigers\", \"wasps\", \"whales\", \"wolves\", \"wombats\", \"zebras\", \"salad\"\n )\n\n return u\"%s %s\" % (random.choice(ADJECTIVES), random.choice(ANIMAL_NOUNS), )", "def rand_stim(self):\n which = np.random.randint(low=0, high=self.ndata)\n signal = self.data[which]\n excess = signal.shape[0] - self.seg_length\n if excess < 0:\n segment = signal\n else:\n where = np.random.randint(low=0, high=excess)\n segment = signal[where:where+self.seg_length]\n segment /= np.max(np.abs(segment)) # norm by max as in Smith & Lewicki\n return segment", "def generate_annotation_volume(\r\n wf,\r\n entity_meta,\r\n gt_proportion=1.0,\r\n padding=(64, 64, 64),\r\n generate_random_bg_entities=False,\r\n num_before_masking=60,\r\n acwe=False,\r\n stratified_selection=False,\r\n class_proportion={0: 1, 1: 1.0, 2: 1.0, 5: 1},\r\n):\r\n entities = wf.locs\r\n # entities_sel = np.random.choice(\r\n # range(len(entities)), int(gt_proportion * len(entities))\r\n # )\r\n # gt_entities = entities[entities_sel]\r\n\r\n if stratified_selection:\r\n stratified_entities = []\r\n for c in np.unique(entities[:, 3]):\r\n single_class = entities[entities[:, 3] == c]\r\n entities_sel = np.random.choice(\r\n range(len(single_class)), int(class_proportion[c] * len(single_class))\r\n )\r\n stratified_entities.append(single_class[entities_sel])\r\n gt_entities = np.concatenate(stratified_entities)\r\n print(f\"Produced {len(gt_entities)} entities.\")\r\n else:\r\n gt_entities = entities\r\n\r\n if generate_random_bg_entities:\r\n random_entities = generate_random_points_in_volume(wf.vols[0], num_before_masking).astype(\r\n np.uint32\r\n )\r\n from survos2.entity.utils import remove_masked_entities\r\n\r\n print(f\"Before masking random entities generated of shape {random_entities.shape}\")\r\n # random_entities = remove_masked_entities(wf.bg_mask, random_entities)\r\n\r\n # print(f\"After masking: {random_entities.shape}\")\r\n random_entities[:, 3] = np.array([6] * len(random_entities))\r\n # augmented_entities = np.vstack((gt_entities, masked_entities))\r\n # print(f\"Produced augmented entities array of shape {augmented_entities.shape}\")\r\n else:\r\n random_entities = []\r\n\r\n anno_masks, anno_all, gt_entities = make_anno(\r\n wf, gt_entities, entity_meta, gt_proportion, padding, acwe=acwe\r\n )\r\n\r\n return anno_masks, anno_all, gt_entities, random_entities", "def generate_horror_title():\n d666 = random.randint(1, 666)\n if d666 <= 111:\n #the adj noun\n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 111 and d666 <= 222: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 > 222 and d666 < 444: \n #the adj noun of verb \n return \"The \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_verb[random.randint(0, len(horror_verb) - 1)]\n elif d666 >= 444 and d666 < 555: \n #noun of noun\n return horror_noun[random.randint(0, len(horror_noun) - 1)] + \" of \" + horror_noun[random.randint(0, len(horror_noun) - 1)]\n elif d666 >= 555:\n #verb of the adj noun\n return horror_verb[random.randint(0, len(horror_verb) - 1)] + \" of the \" + horror_adj[random.randint(0, len(horror_adj) - 1)] + \" \" + horror_noun[random.randint(0, len(horror_noun) - 1)]", "def test_cosmic_at_beginning_ignored(self):\n prng = np.random.RandomState(123459)\n y = prng.normal(size=1000)\n\n y[0] = -14000.\n\n y2 = remove_cosmics(y)\n\n # assert that all the points are unchanged\n np.testing.assert_array_equal(y, y2)", "def test_CTCSegmentation(asr_model: EncoderDecoderASR):\n\n import numpy as np\n from speechbrain.alignment.ctc_segmentation import CTCSegmentation\n from speechbrain.alignment.ctc_segmentation import CTCSegmentationTask\n\n # speech either from the test audio file or random\n # example file included in the speechbrain repository\n # speech = \"./samples/audio_samples/example1.wav\"\n num_samples = 100000\n speech = np.random.randn(num_samples)\n\n # text includes:\n # one blank line\n # kaldi-style utterance names\n # one char not included in char list\n text = (\n \"\\n\"\n \"utt_a THE BIRCH CANOE\\n\"\n \"utt_b SLID ON THE\\n\"\n \"utt_c SMOOTH PLANKS\\n\"\n )\n aligner = CTCSegmentation(\n asr_model=asr_model, kaldi_style_text=True, min_window_size=10,\n )\n segments = aligner(speech, text)\n # check segments\n assert isinstance(segments, CTCSegmentationTask)\n kaldi_text = str(segments)\n first_line = kaldi_text.splitlines()[0]\n assert \"utt_a\" == first_line.split(\" \")[0]\n start, end, score = segments.segments[0]\n assert start > 0.0\n assert end >= start\n assert score < 0.0\n # check options and align with \"classic\" text converter\n option_dict = {\n \"time_stamps\": \"fixed\",\n \"samples_to_frames_ratio\": 512,\n \"min_window_size\": 100,\n \"max_window_size\": 20000,\n \"set_blank\": 0,\n \"scoring_length\": 10,\n \"replace_spaces_with_blanks\": True,\n \"gratis_blank\": True,\n \"kaldi_style_text\": False,\n \"text_converter\": \"classic\",\n }\n aligner.set_config(**option_dict)\n assert aligner.warned_about_misconfiguration\n text = [\n \"THE LITTLE GIRL\",\n \"HAD BEEN ASLEEP\",\n \"BUT SHE HEARD THE RAPS\",\n \"AND OPENED THE DOOR\",\n ]\n segments = aligner(speech, text, name=\"foo\")\n segments_str = str(segments)\n first_line = segments_str.splitlines()[0]\n assert \"foo_0000\" == first_line.split(\" \")[0]\n # test the ratio estimation (result: 509)\n ratio = aligner.estimate_samples_to_frames_ratio()\n assert 400 <= ratio <= 700", "def sample_skills_to_be_covered(self, fraction=1.0):\n self.skills_covered = np.zeros(self.num_skills)\n if fraction < 1.0:\n num_sampled_skills = int(fraction * self.num_skills)\n sampled_skills = np.random.choice(self.num_skills, size=num_sampled_skills, replace=False)\n\n for skill_id in range(self.num_skills):\n if skill_id not in sampled_skills:\n self.skills_covered[skill_id] = 1 # Mark unsampled skills as already covered\n\n self.skills_covered = self.skills_covered.astype(bool)", "def alimony(cps):\n # Head of unit\n mask = cps['tc4_p'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(13. + 1. * rand)\n new_vals = np.where(new_vals < 45000., 45000., new_vals)\n cps.loc[mask, 'alimonyp'] = new_vals\n # spouse of unit\n mask = cps['tc4_s'] > 0\n cps_valid = cps[mask]\n rand = np.random.uniform(size=len(cps_valid))\n new_vals = np.exp(13. + 1. * rand)\n new_vals = np.where(new_vals < 45000., 45000., new_vals)\n cps.loc[mask, 'alimonys'] = new_vals", "def rand_crop_liver(image, label, res_s, out_s,\n apply_data_aug, augment_times=54):\n if image.shape != (res_s, res_s, res_s) or \\\n label.shape != (res_s, res_s, res_s):\n logging.info(\"Unexpected shapes. \"\n \"image.shape: %s, label.shape: %s\",\n image.shape, label.shape)\n return\n\n rough_liver_label = 1\n x, y, z = np.where(label == rough_liver_label)\n bbox_center = [(x.min() + x.max()) // 2,\n (y.min() + y.max()) // 2,\n (z.min() + z.max()) // 2]\n\n def in_range_check(c):\n c = max(c, out_s // 2)\n c = min(c, res_s - out_s // 2)\n return c\n\n for _ in range(augment_times):\n rand_c = []\n for c in bbox_center:\n sigma = out_s // 6\n truncate_rad = out_s // 4\n c += np.clip(np.random.randn() * sigma, -truncate_rad, truncate_rad)\n rand_c.append(int(in_range_check(c)))\n\n image_aug = image[rand_c[0] - out_s // 2:rand_c[0] + out_s // 2,\n rand_c[1] - out_s // 2:rand_c[1] + out_s // 2,\n rand_c[2] - out_s // 2:rand_c[2] + out_s // 2].copy()\n label_aug = label[rand_c[0] - out_s // 2:rand_c[0] + out_s // 2,\n rand_c[1] - out_s // 2:rand_c[1] + out_s // 2,\n rand_c[2] - out_s // 2:rand_c[2] + out_s // 2].copy()\n\n if apply_data_aug:\n image_aug = intensity_change(image_aug)\n\n yield image_aug, label_aug", "def aecSpaceRandomTowers():\n origin = aecPoint(0, 0, 0)\n displace = 175\n spacer = aecSpacer()\n shaper = aecShaper()\n \n def full(point, xWidth, yDepth, zHeight, level):\n floor = aecSpace()\n floor.boundary = shaper.makeBox(point, xWidth, yDepth)\n floor.height = zHeight\n floor.level = level\n setColors([floor])\n return [floor]\n \n def halfDepth(point, xWidth, yDepth, zHeight, level):\n depth = yDepth * 0.5\n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, xWidth, depth)\n half1.height = zHeight\n half1.level = level\n halfSpaces = [half1] + spacer.row(half1, xAxis = False)\n setColors(halfSpaces)\n return halfSpaces\n \n def halfWidth(point, xWidth, yDepth, zHeight, level):\n width = xWidth * 0.5\n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, width, yDepth)\n half1.height = zHeight\n half1.level = level\n halfSpaces = [half1] + spacer.row(half1)\n setColors(halfSpaces)\n return halfSpaces\n \n def quarterDepth(point, xWidth, yDepth, zHeight, level):\n if randint(0, 1) == 0:\n depth = yDepth * 0.25\n scale = 3\n else:\n depth = yDepth * 0.75\n scale = 0.333333333 \n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, xWidth, depth)\n half1.height = zHeight\n half1.level = level \n halfSpaces = [half1] + spacer.row(half1, xAxis = False)\n halfSpaces[1].scale(1, scale, 1, halfSpaces[1].points_floor[0])\n setColors(halfSpaces)\n return halfSpaces\n \n def quarterWidth(point, xWidth, yDepth, zHeight, level):\n if randint(0, 1) == 0:\n width = xWidth * 0.25\n scale = 3\n else:\n width = xWidth * 0.75\n scale = 0.333333333 \n half1 = aecSpace() \n half1.boundary = shaper.makeBox(point, width, yDepth)\n half1.height = zHeight\n half1.level = level \n halfSpaces = [half1] + spacer.row(half1)\n halfSpaces[1].scale(scale, 1, 1, halfSpaces[1].points_floor[0])\n setColors(halfSpaces)\n return halfSpaces\n \n def setColors(halfSpaces):\n colors = [aecColor.blue, aecColor.orange, aecColor.purple, aecColor.yellow]\n colorPick = randint(0, 3)\n halfSpaces[0].color = colors[colorPick]\n if len(halfSpaces) == 1: return\n colors.reverse()\n halfSpaces[1].color = colors[colorPick]\n \n def makeFloor(point, xWidth, yDepth, zHeight, level):\n floorType = randint(0, 4)\n if floorType == 0: floorSpaces = full(point, xWidth, yDepth, zHeight, level)\n if floorType == 1: floorSpaces = halfDepth(point, xWidth, yDepth, zHeight, level)\n if floorType == 2: floorSpaces = halfWidth(point, xWidth, yDepth, zHeight, level)\n if floorType == 3: floorSpaces = quarterDepth(point, xWidth, yDepth, zHeight, level)\n if floorType == 4: floorSpaces = quarterWidth(point, xWidth, yDepth, zHeight, level)\n return floorSpaces\n \n def makeCore(point, xWidth, yDepth, zHeight): \n xCoord = (point.x - 5) + (xWidth * 0.5)\n yCoord = (point.y + (yDepth * (randint(0, 9) * 0.1)))\n point = aecPoint(xCoord, yCoord, point.z)\n core = aecSpace()\n core.boundary = shaper.makeBox(point, 10, 20)\n core.height = zHeight\n core.color = aecColor.gray\n return [core]\n \n def makeTower(point):\n floors = []\n xWidth = uniform(20, 60)\n yDepth = uniform(20, 60)\n levels = randint(5, 50)\n zHeight = uniform(3, 6)\n plinth = aecSpace()\n plinth.boundary = shaper.makeBox(point, xWidth, yDepth)\n plinthScaleX = (uniform(1, 2.5))\n plinthScaleY = (uniform(1, 2.5))\n plinth.scale(plinthScaleX, plinthScaleY, 2, plinth.centroid_floor)\n plinth.height = (zHeight * 2)\n plinth.color = aecColor.green\n floors.append(plinth)\n floors = floors + makeCore(point, xWidth, yDepth, zHeight * (levels + 3))\n level = (zHeight * 2)\n x = 0\n while x < levels:\n floors = floors + makeFloor(point, xWidth, yDepth, zHeight, level)\n level += zHeight\n x += 1 \n return floors\n \n def makeTowerRow(point, columns, displacement):\n towers = []\n towers = towers + makeTower(point)\n x = 0\n while x < columns:\n point.x += displacement\n towers = towers + makeTower(point)\n x += 1\n return towers\n \n def makeTowerRows(point, displacement, columns, rows):\n towers = []\n x = 0\n while x < rows:\n towers = towers + makeTowerRow(point, columns, displacement)\n point.x = 0\n point.y += displacement\n x += 1\n return towers\n \n return makeTowerRows(origin, displace, 4, 5)", "def insert_bonuses(self):\n segs = random.sample(self.segments, 2)\n\n for s in segs:\n offset = random.randint(-10, 10) / 10.0\n self.add_sprite(s, \"bonus\", offset)", "def runRandomEntryStrat(self):\n start, end = self.randomDays()\n \n gain = (self.df.adj_close[end] - getInfl(self.df.adj_close[start], start.year, end.year)) / \\\n getInfl(self.df.adj_close[start], start.year, end.year)\n #if gain > 6:\n # print \"Windfall: \", start, end, gain\n return gain", "def get_cluttered_translated_mnist(n, canvas_height, canvas_width, crop_height, crop_width):" ]
[ "0.5862996", "0.5669793", "0.5645949", "0.54679686", "0.54564303", "0.53577137", "0.52172846", "0.520848", "0.51407593", "0.5131064", "0.5100705", "0.5077602", "0.506642", "0.50296015", "0.5014514", "0.5014449", "0.4957839", "0.49536094", "0.49509683", "0.49455702", "0.49390426", "0.4917769", "0.4916212", "0.4907479", "0.48983052", "0.4884838", "0.48759818", "0.4863719", "0.4857553", "0.48451674" ]
0.60997105
0
Shuffles the categories of the annotations in the given continuum using the process described in
def category_shuffle(self, continuum: Continuum, overlapping_fun: Callable[[str, str], float] = None, prevalence: bool = False): category_weights = self._reference_continuum.category_weights # matrix "A" nb_categories = len(category_weights) prob_matrix = np.eye(nb_categories) # matrix "B or C" if prevalence: sec_matrix = np.ones(nb_categories) / nb_categories else: sec_matrix = np.array([list(category_weights.values())] * nb_categories) categories = list(category_weights.keys()) if overlapping_fun is None: # this formula was deduced from the graphs. prob_matrix = prob_matrix * (1 - self.magnitude ** 2) + sec_matrix * self.magnitude ** 2 else: overlapping_matrix = np.zeros((len(categories), len(categories))) for id1, cat1 in enumerate(categories): sum_line = 0 for id2, cat2 in enumerate(categories): elem = overlapping_fun(cat1, cat2) sum_line += elem overlapping_matrix[id1, id2] = elem overlapping_matrix[id1] /= sum_line # this formula was also deduced from the graphs. prob_matrix = (prob_matrix * (1 - self.magnitude) + sec_matrix * self.magnitude ** 3 + overlapping_matrix * (self.magnitude - self.magnitude ** 3) ) for annotator in continuum.annotators: for unit in list(continuum[annotator]): continuum.remove(annotator, unit) try: new_category = np.random.choice(categories, p=prob_matrix[category_weights.index(unit.annotation)]) except ValueError as e: raise e continuum.add(annotator, Segment(unit.segment.start, unit.segment.end), new_category) del unit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def splits_shuffle(self, continuum: Continuum):\n for _ in range(int(self.magnitude *\n self.SPLIT_FACTOR *\n self._reference_continuum.avg_num_annotations_per_annotator)):\n for annotator in continuum.annotators:\n units = continuum._annotations[annotator]\n to_split = units.pop(numpy.random.randint(0, len(units)))\n security = (to_split.segment.end - to_split.segment.start) * 0.01\n cut = numpy.random.uniform(to_split.segment.start + security, to_split.segment.end)\n\n\n try:\n continuum.add(annotator, Segment(cut, to_split.segment.end), to_split.annotation)\n continuum.add(annotator, Segment(to_split.segment.start, cut), to_split.annotation)\n except ValueError:\n continuum.add(annotator, to_split.segment, to_split.annotation)\n continuum.add(annotator, to_split.segment, to_split.annotation)", "def false_neg_shuffle(self, continuum: Continuum) -> None:\n for annotator in continuum.annotators:\n security = np.random.choice(continuum._annotations[annotator])\n # security : if an annotator doesnt have any annotations gamma cant be computed.\n for unit in list(continuum[annotator]):\n if np.random.random() < self.magnitude:\n continuum.remove(annotator, unit)\n if len(continuum._annotations[annotator]) == 0:\n continuum.add(annotator, security.segment, security.annotation)", "def prep_coco_cats():\n for coco_cat_id, transformed_cat_id_p1 in get_label_map().items():\n transformed_cat_id = transformed_cat_id_p1 - 1\n coco_cats[transformed_cat_id] = coco_cat_id\n coco_cats_inv[coco_cat_id] = transformed_cat_id", "def _load_all(self, anno_file, shuffle):\n image_set_index = []\n labels = []\n coco = COCO(anno_file)\n img_ids = coco.getImgIds()\n #print(img_ids)\n cars=[3,6,8]\n pedestrians=[1]\n cyclists=[2,4]\n lights=[10]\n signs=[13]\n\n apex_categories=cars+pedestrians+cyclists+lights+signs\n cnt=0\n humanonly=0\n human_count=0\n\n for img_id in img_ids:\n relevant=False\n # filename\n image_info = coco.loadImgs(img_id)[0]\n filename = image_info[\"file_name\"]\n #print(filename)\n #subdir = filename.split('_')[1]\n height = image_info[\"height\"]\n width = image_info[\"width\"]\n # label\n anno_ids = coco.getAnnIds(imgIds=img_id)\n annos = coco.loadAnns(anno_ids)\n label = []\n\n #print(\"listing categories for filename: \"+filename)\n\n hashumans=False\n for anno in annos:\n cat_id = int(anno[\"category_id\"])\n if(cat_id in apex_categories):\n cat_reduced= 0 if (cat_id in cars) else 1 if(cat_id in pedestrians) else 2 if(cat_id in cyclists) else 3 if(cat_id in lights) else 4\n bbox = anno[\"bbox\"]\n assert len(bbox) == 4\n xmin = float(bbox[0]) / width\n ymin = float(bbox[1]) / height\n xmax = xmin + float(bbox[2]) / width\n ymax = ymin + float(bbox[3]) / height\n label.append([cat_reduced, xmin, ymin, xmax, ymax, 0])\n #print(\"category: %d\"%cat_reduced)\n if (cat_id in pedestrians):\n hashumans=True\n if(cat_id not in pedestrians): #at least one non-person object is necessary\n relevant=True\n\n if(label and not relevant):\n humanonly+=1\n if label and relevant:\n if(hashumans):\n human_count+=1\n #print(\"adding \"+filename)\n labels.append(np.array(label))\n image_set_index.append(os.path.join(self.set, filename))\n cnt+=1\n print(\"added %d images\"%cnt)\n print(\"%d images has only humans\"%humanonly)\n print(\"%d registered images has humans\"%human_count)\n\n if shuffle:\n import random\n indices = range(len(image_set_index))\n random.shuffle(indices)\n image_set_index = [image_set_index[i] for i in indices]\n labels = [labels[i] for i in indices]\n # store the results\n self.image_set_index = image_set_index\n self.labels = labels", "def shift_shuffle(self, continuum: Continuum) -> None:\n shift_max = self.magnitude * self.SHIFT_FACTOR * \\\n self._reference_continuum.avg_length_unit\n for annotator in continuum.annotators:\n for unit in continuum[annotator]:\n continuum.remove(annotator, unit)\n start_seg, end_seg = 0.0, 0.0\n while start_seg >= end_seg:\n start_seg = unit.segment.start + np.random.uniform(-1, 1) * shift_max\n end_seg = unit.segment.end + np.random.uniform(-1, 1) * shift_max\n continuum.add(annotator, Segment(start_seg, end_seg), unit.annotation)", "def false_pos_shuffle(self, continuum: Continuum) -> None:\n ref_units = self._reference_continuum[self._reference_annotator]\n avg_dur = np.average([unit.segment.end - unit.segment.start for unit in ref_units])\n var_dur = np.std([unit.segment.end - unit.segment.start for unit in ref_units])\n category_weights = self._reference_continuum.category_weights\n bounds_inf, bounds_sup = self._reference_continuum.bound_inf, self._reference_continuum.bound_sup\n for annotator in continuum.annotators:\n for _ in range(int(self.magnitude * self.FALSE_POS_FACTOR * len(self._reference_continuum))):\n # a random unit is generated from a (all random) central point, duration, and category\n category = np.random.choice(category_weights.keys(), p=category_weights.values())\n center = np.random.uniform(bounds_inf, bounds_sup)\n duration = abs(np.random.normal(avg_dur, var_dur))\n continuum.add(annotator,\n Segment(center - duration / 2, center + duration / 2),\n annotation=category)", "def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])", "def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n all_attr_names = lines[1].split()\n for i, attr_name in enumerate(all_attr_names):\n self.attr2idx[attr_name] = i\n self.idx2attr[i] = attr_name\n\n lines = lines[2:]\n random.seed(1234)\n random.shuffle(lines)\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n values = split[1:]\n\n label = []\n for attr_name in self.selected_attrs:\n idx = self.attr2idx[attr_name]\n label.append(values[idx] == '1')\n\n if (i+1) < 4:\n self.test_dataset.append([filename, label])\n else:\n self.train_dataset.append([filename, label])", "def predict_category(self):\n pass", "def categorize_classifier_files(out_dir):\n\n #sort all of the classifier files into a dictionary\n class_files = glob.glob(\"feature_extraction_m*\")\n class_file_dict = {\"positive\":[], \"negative\":[]}\n class_cand_dict = {\"m1\":class_file_dict, \"m2\":class_file_dict, \"m3\":class_file_dict, \"m4\":class_file_dict, \"m5\":class_file_dict}\n\n for filename in class_files:\n split_name = filename.split(\"_\")[-1].split(\".\")\n model_num = split_name[0]\n det = split_name[-1]\n class_cand_dict[model_num][det].append(filename)\n\n #get all of the pfd files into a list\n class_file_m1 = glob.glob(\"feature_extraction_m1*\")\n pfd_files = []\n for afile in class_file_m1:\n f = open(afile, \"r\")\n for line in f.readlines():\n pfd_files.append(line)\n f.close()\n\n #fill a dictionary with pfds and a value for how many positive IDs each pfd has\n pulsar_pfds={}\n for key in pfd_files:\n pulsar_pfds[key]=0\n for model_num in class_cand_dict.keys():\n if class_cand_dict[model_num][\"positive\"]:\n print(class_cand_dict[model_num][\"positive\"])\n f = open(class_cand_dict[model_num][\"positive\"][0], \"r\")\n for line in f.readlines():\n pulsar_pfds[line]+=1\n f.close()\n\n #For each pfd with >=3 positive IDs, write that pfd to 'positive' file, else write to 'negative' file\n pos_f = open(os.path.join(out_dir, \"LOTAAS_positive_detections.txt\"), \"w+\")\n neg_f = open(os.path.join(out_dir, \"LOTAAS_negative_detections.txt\"), \"w+\")\n for pfd_key in pulsar_pfds.keys():\n if pulsar_pfds[pfd_key]>=3:\n print(\"detected pulsar: {}\".format(pfd_key))\n pos_f.write(pfd_key.split(\"/\")[-1])\n else:\n neg_f.write(pfd_key.split(\"/\")[-1])\n pos_f.close()\n neg_f.close()", "def test_compare_categories_categorical_variables(self):\r\n for method in self.cat_methods:\r\n compare_categories(self.dm1_fp, self.map1_fp, method,\r\n self.cat_categories, self.num_perms, self.test_dir)\r\n results_fp = join(self.test_dir, '%s_results.txt' % method)\r\n self.files_to_remove.append(results_fp)\r\n results_f = open(results_fp, 'U')\r\n results = results_f.readlines()\r\n results_f.close()\r\n\r\n # Make sure the files aren't empty.\r\n self.assertTrue(len(results) > 0)", "def categorize_recipe(data):\n percent_lines = []\n for line in data:\n\n tipo = analyze_line(line)", "def fit_transform(*args: str) -> List[Tuple[str, List[int]]]:\n if len(args) == 0:\n raise TypeError('expected at least 1 arguments, got 0')\n\n categories = args if isinstance(args[0], str) else list(args[0])\n uniq_categories = set(categories)\n bin_format = f'{{0:0{len(uniq_categories)}b}}'\n\n seen_categories = dict()\n transformed_rows = []\n\n for cat in categories:\n bin_view_cat = (int(b) for b in\n bin_format.format(1 << len(seen_categories)))\n seen_categories.setdefault(cat, list(bin_view_cat))\n transformed_rows.append((cat, seen_categories[cat]))\n\n return transformed_rows", "def fit_transform(*args: str) -> List[Tuple[str, List[int]]]:\n if len(args) == 0:\n raise TypeError('expected at least 1 arguments, got 0')\n\n categories = args if isinstance(args[0], str) else list(args[0])\n uniq_categories = set(categories)\n bin_format = f'{{0:0{len(uniq_categories)}b}}'\n\n seen_categories = dict()\n transformed_rows = []\n\n for cat in categories:\n bin_view_cat = (int(b) for b in bin_format.format(1 << len(seen_categories)))\n seen_categories.setdefault(cat, list(bin_view_cat))\n transformed_rows.append((cat, seen_categories[cat]))\n\n return transformed_rows", "def fit_transform(*args) -> List[Tuple[str, List[int]]]:\n if len(args) == 0:\n raise TypeError('expected at least 1 arguments, got 0')\n\n categories = args if isinstance(args[0], str) else list(args[0])\n uniq_categories = set(categories)\n bin_format = f'{{0:0{len(uniq_categories)}b}}'\n\n seen_categories = dict()\n transformed_rows = []\n\n for cat in categories:\n bin_view_cat = (int(b) for b in bin_format.format(1 << len(seen_categories)))\n seen_categories.setdefault(cat, list(bin_view_cat))\n transformed_rows.append((cat, seen_categories[cat]))\n\n return transformed_rows", "def preprocessing_labels1(y,c = 1.,m = 0.6, f = 0.2 ,dataset = 'mnist'):\n perm_mnist = [3,5,8,6,0,4,7,9,2,1]\n perm_fmnist = [0,2,6,3,4,5,7,9,1,8]\n perm = [0,1,2,3,4,5,6,7,8,9]\n perm_cifar10 = [0,8,1,9,2,6,3,5,4,7]\n n = y.shape[0]\n y_res1 = np.zeros((int(c*n),2))\n print(int(c*n))\n y_res3 = np.zeros((int(f*n),10))\n if dataset == 'cifar10':\n perm = perm_cifar10\n elif dataset == 'mnist':\n perm = perm_mnist\n elif dataset == 'fashion_mnist':\n perm = perm_fmnist\n if dataset == 'cifar10':\n y_res2= np.zeros((int(m*n),5))\n for i in range(n):\n if i< int(c*n):\n if np.argmax(y[i]) in [0,1,8,9]:\n y_res1[i,0] = 1\n else :\n y_res1[i,1] = 1\n if i<int(m*n):\n if np.argmax(y[i]) in [0,8]:\n y_res2[i,0] = 1\n elif np.argmax(y[i]) in [1,9]:\n y_res2[i,1] = 1\n elif np.argmax(y[i]) in [2,6]:\n y_res2[i,2] = 1\n elif np.argmax(y[i]) in [3,5]:\n y_res2[i,3] = 1\n elif np.argmax(y[i]) in [4,7]:\n y_res2[i,4] = 1\n if i<int(f*n):\n y_res3[i,np.argmax(y[i])] = 1\n return(y_res1,y_res2,y_res3)\n else :\n y_res2= np.zeros((int(m*n),4))\n for i in range(n):\n if i< int(c*n):\n if np.argmax(y[i]) in perm[0:5]:\n y_res1[i,0] = 1\n else :\n y_res1[i,1] = 1\n if i<int(m*n):\n if np.argmax(y[i]) in perm[0:3]:\n y_res2[i,0] = 1\n elif np.argmax(y[i]) in perm[3:5]:\n y_res2[i,1] = 1\n elif np.argmax(y[i]) in perm[5:8]:\n y_res2[i,2] = 1\n elif np.argmax(y[i]) in perm[8:]:\n y_res2[i,3] = 1\n if i<int(f*n):\n y_res3[i,np.argmax(y[i])] = 1\n return(y_res1,y_res2,y_res3)", "def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512):\r\n\r\n data = {\r\n 'mapping': [],\r\n 'labels': [],\r\n 'MFCCs': [],\r\n 'files': []\r\n }\r\n\r\n # loop through all sub-dirs\r\n total_samples = 0\r\n valid_samples = 0\r\n for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))):\r\n\r\n # ensure we're at sub-folder level\r\n if dirpath is not dataset_path:\r\n # save label (i.e., sub-folder name) in the mapping\r\n label = dirpath.partition('speech_commands_subset')[-1][1:]\r\n\r\n data['mapping'].append(label)\r\n print(\"\\nProcessing: '{}'\".format(label))\r\n print(\"number of files for each class: \", len(filenames))\r\n # process all audio files\r\n for f in filenames:\r\n total_samples += 1\r\n file_path = os.path.join(dirpath, f)\r\n\r\n # load audio file and slice it to ensure length consistency among different files\r\n signal, sample_rate = librosa.load(file_path)\r\n # print(signal.shape)\r\n # print(type(signal[0]))\r\n\r\n # drop audio files with less than pre-decided number of samples\r\n if len(signal) >= SAMPLES_TO_CONSIDER:\r\n valid_samples += 1\r\n # ensure consistency of the length of the signal\r\n signal = signal[:SAMPLES_TO_CONSIDER]\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, \r\n hop_length = hop_length) \r\n # print(MFCCs.shape)\r\n # print(type(MFCCs[0,0]))\r\n\r\n # store data for analysed track\r\n data['MFCCs'].append(MFCCs.T.tolist())\r\n data['labels'].append(i-1)\r\n # data['files'].append(file_path)\r\n # print(\"{}: {}\".format(file_path, i-1))\r\n\r\n # if valid_samples == 20:\r\n # valid_samples =0\r\n # break\r\n print(\"\\ntotal samples: \", total_samples)\r\n print(\"\\nvalid_samples: \", valid_samples)\r\n\r\n \r\n return data", "def categorize_attributes():\n global attr_categories, seeds\n print \"Generating seeds...\"\n seeds = get_seeds()\n\n print \"Categorizing attributes...\"\n categorized = categorize(seeds)\n \n category_distances = {}\n attr_categories = {}\n for c in categorized:\n for (attr, score) in categorized[c]:\n attr_categories[attr] = c\n category_distances[attr] = score", "def concept_categorization(self):\n dataset = pd.read_csv(\"data/Categorization data set.csv\", sep=\";\", header=None)\n dataset.columns = ['concept','word']\n\n cti = {}\n for i,c in enumerate(np.unique(dataset.concept.values)):\n cti[c] = i\n y_true = dataset.concept.apply(lambda x: cti[x]).values\n vs = []\n preds = [''] * dataset.shape[0]\n for ind,w in enumerate(dataset.word.values):\n try:\n vs.append(self.embeddings_index[w])\n except:\n preds[ind] = 0 \n km = KMeans(n_clusters=22, random_state=0)\n km.fit(np.array(vs).astype(np.float32))\n for ind,w in enumerate(dataset.word.values):\n if preds[ind] == '':\n preds[ind] = km.predict(np.array([self.embeddings_index[w]]))[0]\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, preds)\n #purity score\n return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)", "def manual_preprocess(self,config, folderLocation):\n with open(config) as f:\n config_data= yaml.load(f,Loader=FullLoader) \n\n df = pd.read_csv(config_data[\"raw_data_address\"])\n df.dropna(how='all', axis=1, inplace=True)\n\n label_list = []\n if config_data[\"is_auto_preprocess\"] == False:\n\n if config_data['drop_column_name'] != []:\n del config_data['drop_column_name'][0]\n for column in config_data['drop_column_name']:\n if column != config_data['target_column_name']:\n df=df.drop(column, axis = 1)\n else:\n del config_data['drop_column_name'][0]\n\n\n if config_data['imputation_column_name'] != []:\n del config_data['imputation_column_name'][0]\n del config_data['impution_type'][0]\n strategy_values_list = []\n\n for index, column in enumerate(config_data[\"imputation_column_name\"]):\n if df[column].dtype == object:\n impution_type = \"most_frequent\"\n config_data[\"impution_type\"][index] = \"most_frequent\"\n else:\n impution_type = config_data[\"impution_type\"][index] \n\n if impution_type == \"mean\":\n df_value = df[[column]].values\n imputer = SimpleImputer(missing_values = np.nan, strategy = \"mean\")\n strategy_values_list.append(df[column].mean())\n df[[column]] = imputer.fit_transform(df_value)\n\n elif impution_type == \"median\":\n df_value = df[[column]].values\n imputer = SimpleImputer(missing_values = np.nan, strategy = \"median\")\n strategy_values_list.append(df[column].median())\n df[[column]] = imputer.fit_transform(df_value)\n\n elif impution_type == \"most_frequent\":\n df.fillna(df.select_dtypes(include='object').mode().iloc[0], inplace=True)\n strategy_values_list.append(df[column].value_counts().idxmax())\n\n elif impution_type=='knn':\n df_value = df[[column]].values\n imputer = KNNImputer(n_neighbors = 4, weights = \"uniform\",missing_values = np.nan)\n strategy_values_list.append(0)\n df[[column]] = imputer.fit_transform(df_value)\n\n if strategy_values_list != []:\n config_data['mean_median_mode_values'] = list(map(str, strategy_values_list)) \n \n\n if config_data['scaling_column_name'] != []:\n del config_data['scaling_column_name'][0]\n del config_data['scaling_type'][0]\n scaled_value_list = []\n for index, column in enumerate(config_data[\"scaling_column_name\"]):\n if df[column].dtype == object or config_data[\"target_column_name\"] == column:\n del config_data['scaling_column_name'][index]\n del config_data['scaling_type'][index]\n pass\n else:\n scaling_type = config_data[\"scaling_type\"][index]\n config_data['scaling_values'] = {}\n df_value = df[[column]].values\n df_std = (df_value - df_value.min(axis=0)) / (df_value.max(axis=0) - df_value.min(axis=0))\n\n if scaling_type == \"normalization\":\n scaled_value = df_std\n scaled_value_list.append({\"min\":float(df_value.min(axis=0)),\"max\":float(df_value.max(axis=0))})\n\n elif scaling_type == 'standarization':\n scaled_value = (df_value - df_value.mean()) / df_std \n scaled_value_list.append({\"min\":float(df_value.min(axis=0)),\"max\":float(df_value.max(axis=0)),\"mean\":float(df_value.mean())})\n\n config_data['scaling_values'] = scaled_value_list\n df[[column]] = scaled_value\n\n\n if config_data['encode_column_name'][0] != []:\n del config_data['encode_column_name'][0]\n del config_data['encoding_type'][0]\n \n\n for index, column in enumerate(config_data[\"encode_column_name\"]):\n encoding_type = config_data[\"encoding_type\"][index]\n\n if(df[column].dtype == 'object') and (df[column].nunique() > 30) and (config_data[\"target_column_name\"] != column):\n df.drop(column, axis = 1,inplace=True)\n del config_data['encode_column_name'][index]\n del config_data['encoding_type'][index]\n\n elif config_data[\"target_column_name\"] == column and df[column].dtype == 'object':\n config_data[\"encoding_type\"][index] = \"Label Encoding\"\n encoding_type == \"Label Encoding\"\n\n elif config_data[\"target_column_name\"] == column and df[column].dtype != 'object':\n del config_data['encode_column_name'][index]\n del config_data['encoding_type'][index]\n pass\n\n elif df[column].dtype != 'object'and df[column].nunique() > 30:\n del config_data['encode_column_name'][0]\n del config_data['encoding_type'][0]\n pass\n\n elif encoding_type == \"Label Encoding\":\n df[column].astype(str)\n encoder = LabelEncoder()\n df[column] = encoder.fit_transform(df[column])\n key = list(map(str,encoder.classes_.tolist()))\n label_list.append({column : dict(zip(key, range(len(key))))})\n config_data['labels'] = label_list\n\n elif encoding_type == \"One-Hot Encoding\":\n encoder = OneHotEncoder(sparse=False)\n df_encoded = pd.DataFrame (encoder.fit_transform(df[[column]]))\n df_encoded.columns = encoder.get_feature_names([column])\n df.drop([column] ,axis=1, inplace=True)\n df= pd.concat([df, df_encoded ], axis=1)\n \n\n ### Default\n df.fillna(df.dtypes.replace({'float64': 0.0, 'O': 'NULL'}), downcast='infer', inplace=True)\n for column in df.columns:\n if df[column].dtype == 'object'and df[column].nunique() > 30 and config_data[\"target_column_name\"] != column:\n df.drop(column, axis = 1,inplace=True)\n config_data['drop_column_name'].extend([column])\n\n if df[config_data[\"target_column_name\"]].dtype == 'object':\n column=config_data[\"target_column_name\"]\n df[column].astype(str)\n encoder = LabelEncoder()\n df[column] = encoder.fit_transform(df[column])\n key = list(map(str,encoder.classes_.tolist()))\n label_list.append({column : dict(zip(key, range(len(key))))})\n config_data['labels'] = label_list\n\n object_type_column_list = []\n for column in df.columns:\n if df[column].dtype == 'object':\n object_type_column_list.append(column)\n config_data['encode_column_name'].extend([column])\n config_data['encoding_type'].extend(['One-Hot Encoding'])\n\n if object_type_column_list != []:\n for column in object_type_column_list:\n encoder = OneHotEncoder(sparse=False)\n df_encoded = pd.DataFrame (encoder.fit_transform(df[[column]]))\n df_encoded.columns = encoder.get_feature_names([column])\n df.drop([column] ,axis=1, inplace=True)\n df= pd.concat([df, df_encoded ], axis=1)\n \n \n\n # if config_data[\"Remove_outlier\"] == True:\n # z = np.abs(stats.zscore(df))\n # df = df[(z < 3).all(axis=1)]\n \n\n # if config_data[\"feature_selection\"] == True:\n # col_corr = set()\n # corr_matrix = df.corr()\n # for i in range(len(corr_matrix.columns)):\n # for j in range(i):\n # if abs(corr_matrix.iloc[i, j]) > 0.90:\n # col_corr.add(corr_matrix.columns[i])\n # df = df.drop(col_corr,axis=1)\n # config_data['corr_col'] = list(col_corr)\n \n config_data['final_columns']=list(df.columns)\n \n df.to_csv('clean_data.csv')\n shutil.move(\"clean_data.csv\",folderLocation)\n clean_data_address = os.path.abspath(os.path.join(folderLocation,\"clean_data.csv\"))\n config_data['clean_data_address'] = clean_data_address\n\n with open(config, 'w') as yaml_file:\n yaml_file.write( yaml.dump(config_data, default_flow_style=False))\n \n return clean_data_address", "def _preprocess(self, X: np.ndarray) -> np.ndarray:\n categories_array = np.zeros((X.shape[0], self.n_categories))\n categories_idx = 0\n for idx in range(len(self.types)):\n if self.types[idx] == 0:\n continue\n else:\n for j in range(self.types[idx]):\n mask = X[:, idx] == j\n categories_array[mask, categories_idx] = 1\n categories_idx += 1\n numerical_array = X[:, ~self.categorical_mask]\n X = np.concatenate((numerical_array, categories_array), axis=1)\n X[np.isnan(X)] = -1.0\n return X", "def tokenize(self):\n count = 0\n for entry in self._entries:\n token_pairs = []\n for relation in entry['relations']:\n assert len(relation) == 3\n token_pairs.append((relation[0][0],relation[1][0],relation[2][0]))\n\n num_rels = len(entry['relations'])\n num_random_rels = (self._max_seq_length - 2) // 3 - num_rels\n\n if num_random_rels>0:\n pass\n # gt_pairs = {(rel[0],rel[2]) for rel in entry['relations']}\n # random_pairs = self._get_random_pair(entry['objects'], gt_pairs, num_random_rels)\n # for pair in list(random_pairs):\n # token_pairs.append((pair[0][0],'background', pair[1][0]))\n else:\n for i in range(-num_random_rels):\n token_pairs.pop()\n\n random.shuffle(token_pairs)\n tokens = []\n for pair in token_pairs:\n tokens.extend(pair)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n tokens_char = tokens\n\n target = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3==2 else -1 for i, x in enumerate(tokens)]\n tokens = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3!=2 else self._tokenizer.vocab.get('[MASK]', self._tokenizer.vocab['[UNK]']) for i, x in enumerate(tokens)]\n \n for i in range(len(tokens)):\n if target[i] != -1:\n print(tokens_char[i],tokens[i],target[i])\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n # input_mask = [1 if i%3==2 else 0 for i in range(len(tokens))]\n # co_attention_mask = [-1 if i%3==2 else 1 for i in range(len(tokens))]\n # co_attention_mask = torch.zeros((self._max_region_num, self._max_seq_length))\n # co_attention_mask[0] = -1\n # co_attention_mask[-1] = -1\n \n if len(tokens) < self._max_seq_length:\n padding = [self._padding_index] * (self._max_seq_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding \n target += [-1] * len(padding) \n\n assert_eq(len(tokens), self._max_seq_length)\n entry['input_ids'] = tokens \n entry[\"input_mask\"] = input_mask\n entry['segment_ids'] = segment_ids\n # entry[\"co_attention_mask\"] = co_attention_mask\n entry['target'] = target\n\n sys.stdout.write('%d/%d\\r' % (count, len(self._entries)))\n sys.stdout.flush()\n count += 1", "def __init__(self, root, which_set, vocab, transform=None):\n self.root = root\n self.img_root = os.path.join(root, 'Img')\n self.ann = json.load(open(os.path.join(root, '{}_labels.json'.format(which_set)),'r'))\n\n self.vocab = vocab\n self.transform = transform\n self.img_list = list(self.ann.keys())\n # transfer categories id to labels\n self.cat2label = {}\n for i, k in enumerate(label_corpus):\n self.cat2label[k] = i\n\n self.num_cats = len(self.cat2label) \n\n # vgnome has varied number of annotations [1, 20], average 5.73\n # we still choose five as the parameter. It can be adjusted later on\n self.num_ann_onebatch = 5\n self.ids = [a for a in range(len(self.ann))]\n\n print('\\t {} train samples from {} set'.format(len(self.ids), which_set ))\n print('\\t {} of categories'.format(self.num_cats))", "def test_categorical_feature():\n\n feature = Categorical(\"abc\")\n\n for element in \"abc\":\n feature.set(element)\n feature.set(\"ignore this\")\n feature.push()\n\n for element in \"abc\":\n getattr(feature, \"set_\" + element)()\n feature.push()\n\n array = feature.array()\n assert array.shape == (6, 3)\n for i, row in enumerate(array):\n assert sum(row) == 1.0 and row[i % 3] == 1.0", "def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')", "def plotCifar():\n classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n num_classes = len(classes)\n samples_per_class = 7\n for y, cls in enumerate(classes):\n #print(type(y)) #<class 'int'>\n #print(y) # 0 to 9 - 10 Classes \n idxs = np.flatnonzero(y_train == y) ##FOO_BAR_TBD--\n #print(type(idxs)) # <class 'numpy.ndarray'> \n #Output array, containing the indices of the elements of a.ravel() that are non-zero.\n #print(idxs) #[ 29 30 35 ... 49941 49992 49994]\n idxs = np.random.choice(idxs, samples_per_class, replace=False)\n for i, idx in enumerate(idxs):\n plt_idx = i * num_classes + y + 1\n plt.subplot(samples_per_class, num_classes, plt_idx)\n plt.imshow(X_train[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls)\n plt.show()", "def preprocess_categories(\n\tbusiness,\n\texclude_words=[\"Food\", \"Restaurants\"]\n\t):\n\tline = business[CATEGORIES].split(\", \")\n\tcate_list = [x for x in line if x not in exclude_words]\n\tbusiness[CATEGORIES] = cate_list", "def load_classification_dataset(step, do_lower_case,data_type,data_subtype,use_syntetic_data):\n assert step in ['train', 'test']\n binary = False \n undersample_majority = False\n\n paths = ['~/Github/Data/Patient/NIRADS/PET_CT_NIRADS.xlsx', '~/Github/Data/Patient/NIRADS/MR_NIRADS_2018.xlsx','~/Github/Data/Patient/NIRADS/MR_NIRADS.xlsx']\n if data_type == 'ct':\n data_r = pd.read_excel(paths[0])\n else:\n data_r = pd.read_excel(paths[1])\n data_r.append(pd.read_excel(paths[2]), ignore_index = True, sort=False)\n\n data_p,data_n, y_p, y_n = tc.text_cleaning(data_r, None, data_target='section') \n\n if data_subtype == 'primary':\n data = data_p\n y = y_p -1\n else:\n data = data_n\n y = y_n -1\n\n if binary:\n y[y<2]=0\n y[y>0]=1\n\n y_dist = [np.sum(y==x) for x in np.unique(y)]\n print(\"Distribution of all labels: \", y_dist, \"\\n\\n\")\n\n train_text, test_text, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=1)\n\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels: \", y_dist, \"\\n\\n\")\n\n if step =='train':\n if use_syntetic_data:\n data_syntetic = pd.read_csv('~/Github/Data/Patient/NIRADS/PET_CT_NIRADS_syntetic.csv')\n train_text = np.concatenate((train_text,data_syntetic['syntetic_data'].values))\n y_train = np.concatenate((y_train,data_syntetic['syntetic_label'].values-1))\n\n train_text, test_text, y_train, y_test = train_test_split(train_text, y_train, test_size=0.5, random_state=1)\n train_text = np.concatenate((train_text,test_text))\n y_train = np.concatenate((y_train,y_test))\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels after inserting syntetic data: \", y_dist, \"\\n\\n\")\n\n if not undersample_majority:\n data_to_use = train_text.copy()\n y_to_use = y_train.copy()\n else:\n max_label1 = 1000\n data_to_use = []\n y_to_use = []\n y1=0\n for x in range(len(y_train)):\n if y_train[x] !=1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n else:\n if y1 <max_label1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n y1+=1\n\n else:\n data_to_use = test_text.copy()\n y_to_use = y_test.copy()\n\n basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n examples = []\n\n for i, tokens in tqdm(enumerate(data_to_use)):\n label = y_to_use[i]\n examples.append(\n ClassificationExample(\n id=i,\n tokens_a=basic_tokenizer.tokenize(tokens),\n tokens_b=None,\n label=label,\n )\n )\n logging.info('Number of `%s` examples: %d', step, len(examples))\n \n return examples", "def train_cats(df):\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()", "def train_cats(df):\n for n,c in df.items():\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()" ]
[ "0.66190857", "0.60596675", "0.5748635", "0.5743178", "0.56422764", "0.54873806", "0.5344997", "0.5321224", "0.51596314", "0.5148415", "0.5089022", "0.5050082", "0.5033594", "0.50281596", "0.5023817", "0.500936", "0.49773428", "0.49739432", "0.4970692", "0.49623668", "0.49614674", "0.49596122", "0.49398482", "0.49378845", "0.4923401", "0.49125096", "0.49033433", "0.49028298", "0.48904288", "0.48904288" ]
0.6433183
1
Tweak the continuum by randomly splitting segments. Number of splits per annotator is constant & proportionnal to the magnitude of the CST and the number of units in the reference. A splitted segment can be resplitted.
def splits_shuffle(self, continuum: Continuum): for _ in range(int(self.magnitude * self.SPLIT_FACTOR * self._reference_continuum.avg_num_annotations_per_annotator)): for annotator in continuum.annotators: units = continuum._annotations[annotator] to_split = units.pop(numpy.random.randint(0, len(units))) security = (to_split.segment.end - to_split.segment.start) * 0.01 cut = numpy.random.uniform(to_split.segment.start + security, to_split.segment.end) try: continuum.add(annotator, Segment(cut, to_split.segment.end), to_split.annotation) continuum.add(annotator, Segment(to_split.segment.start, cut), to_split.annotation) except ValueError: continuum.add(annotator, to_split.segment, to_split.annotation) continuum.add(annotator, to_split.segment, to_split.annotation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)", "def shift_shuffle(self, continuum: Continuum) -> None:\n shift_max = self.magnitude * self.SHIFT_FACTOR * \\\n self._reference_continuum.avg_length_unit\n for annotator in continuum.annotators:\n for unit in continuum[annotator]:\n continuum.remove(annotator, unit)\n start_seg, end_seg = 0.0, 0.0\n while start_seg >= end_seg:\n start_seg = unit.segment.start + np.random.uniform(-1, 1) * shift_max\n end_seg = unit.segment.end + np.random.uniform(-1, 1) * shift_max\n continuum.add(annotator, Segment(start_seg, end_seg), unit.annotation)", "def prepareParrallelize(self,segs):\n\n angles = numpy.array([s.angle for s in segs ])\n angles[numpy.where(angles<0)] += _pi # we care about direction, not angle orientation\n clList = clusterValues(angles, 0.15, refScaleAbs='abs')\n\n for cl in clList:\n meanA = angles[list(cl)].mean()\n for i in cl:\n seg = segs[i]\n seg.newAngle = meanA if seg.angle>=0. else meanA-_pi", "def _setup_splits(self):\n #ntot = self.reredux_conf['nperfile']\n ntot = self.reredux_conf['Ngals']\n npersplit = self.runconf['nper']\n\n self.beglist, self.endlist = get_splits(ntot, npersplit)", "def setSplit(tmr_channel, total, newSecondPart):\n writeTMR(tmr_channel, TMR_CMPLD1, total-newSecondPart)\n writeTMR(tmr_channel, TMR_CMPLD2, newSecondPart)", "def split():\n flag = 0\n for chromosome in region:\n for inf in region[chromosome]:\n if flag == 0:\n if chromosome not in test_set:\n test_set[chromosome] = [inf]\n else:\n test_set[chromosome].append(inf)\n else:\n if chromosome not in train_set:\n train_set[chromosome] = [inf]\n else:\n train_set[chromosome].append(inf)\n\n flag += 1\n flag %= 10", "def split_ctm(file_name, segments, num_lines, dst_dir, abbrev_segment_names):\n with open(file_name, 'r') as file_in:\n counter = 0\n for segment, num_line in zip(segments, num_lines):\n if abbrev_segment_names:\n file_name = abbreviate_segment(segment)\n else:\n file_name = segment\n dst_file = os.path.join(dst_dir, file_name + '.npz')\n time = []\n duration = []\n word = []\n for _ in range(num_line):\n line = next(file_in).split()\n counter += 1\n if not line[0].startswith(';;') and len(line) >= LINE_LEN:\n assert line[0] == segment, \"Mismatch between {} and {}\".format(line[0], segment)\n time.append(float(line[START_IDX]))\n duration.append(float(line[DUR_IDX]))\n word.append(line[WORD_IDX])\n np.savez(dst_file, time=time, duration=duration, word=word)", "def train_data_split(self, selected_sr, selected_ss):\n \"\"\"\n Arguments:\n selected_sr: ordinal number of the selected split ratio\n selected_ss: ordinal number of split shift\n \"\"\"\n assert selected_sr < len(self.split_ratios),\\\n \"The total number of possible split ratios is: %d\"\\\n % len(self.split_ratios)\n\n max_shifts = 100 / self.split_ratios[selected_sr][-1]\n\n assert selected_ss < max_shifts,\\\n \"The total number of split shifts is: %d\" % max_shifts\n\n self.empty_split()\n\n n = float(self.n_train) / max_shifts\n self.n_develop = int(self.split_ratios[selected_sr][0] /\n (100 / max_shifts) * n)\n\n self.n_valid = int(self.split_ratios[selected_sr][1] /\n (100 / max_shifts) * n)\n\n self.n_eval = self.n_train - self.n_develop - self.n_valid\n\n for i in range(self.n_develop):\n self.development_subjects.\\\n append(self.training_subjects[(selected_ss * self.n_eval + i) %\n self.n_train])\n\n for i in range(self.n_valid):\n self.validation_subjects.\\\n append(self.training_subjects[(selected_ss * self.n_eval +\n self.n_develop + i) %\n self.n_train])\n\n for i in range(self.n_eval):\n self.evaluation_subjects.\\\n append(self.training_subjects[(selected_ss * self.n_eval +\n self.n_develop +\n self.n_valid + i) %\n self.n_train])", "def __init__(self):\n super().__init__()\n self._points = 0\n self._segments = []\n self.fill_list()\n # i = random.randint(0, len(self._segments) - 1)\n # self.set_text(self._segments[i])\n self.reset()", "def reformatCoronalView4NeedleSegment(self, base, tip, ID=-1):\r\n #research\r\n profprint()\r\n for i in range(2): # workaround update problem\r\n if ID >=0:\r\n modelNode = slicer.util.getNode('vtkMRMLModelNode' + str(ID))\r\n polyData = modelNode.GetPolyData()\r\n nb = polyData.GetNumberOfPoints()\r\n base = [0, 0, 0]\r\n tip = [0, 0, 0]\r\n polyData.GetPoint(nb - 1, tip)\r\n polyData.GetPoint(0, base)\r\n a, b, c = tip[0] - base[0], tip[1] - base[1], tip[2] - base[2]\r\n \r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(1)\r\n reformatLogic.SetSliceNormal(sGreen, 1, -a / b, 0)\r\n #reformatLogic.SetSliceOrigin(sGreen, base[0],base[1],base[2])#crashes\r\n m = sGreen.GetSliceToRAS()\r\n m.SetElement(0, 3, base[0])\r\n m.SetElement(1, 3, base[1])\r\n m.SetElement(2, 3, base[2])\r\n sGreen.Modified()", "def _trainBySegments(self, divisions, trainingSet):\n # subdivide domain and train subdomain ROMs, as with the segmentation\n ## TODO can we increase the inheritance more here, or is this the minimum cutset?\n counter, remainder = divisions\n # store delimiters\n if len(remainder):\n self.raiseADebug('\"{}\" division(s) are being excluded from clustering consideration.'.format(len(remainder)))\n ## train ROMs for each segment\n roms = self._trainSubdomainROMs(self._templateROM, counter, trainingSet, self._romGlobalAdjustments)\n # collect ROM features (basic stats, etc)\n clusterFeatures = self._gatherClusterFeatures(roms, counter, trainingSet)\n # future: requested metrics\n ## TODO someday\n # store clustering info, unweighted\n self._clusterInfo['features'] = {'unscaled': copy.deepcopy(clusterFeatures)}\n # weight and scale data\n ## create hierarchy for cluster params\n features = sorted(clusterFeatures.keys())\n hierarchFeatures = defaultdict(list)\n for feature in features:\n _, metric, ident = feature.split('|', 2)\n # the same identifier might show up for multiple targets\n if ident not in hierarchFeatures[metric]:\n hierarchFeatures[metric].append(ident)\n ## weighting strategy, TODO make optional for the user\n weightingStrategy = 'uniform'\n clusterFeatures = self._weightAndScaleClusters(features, hierarchFeatures, clusterFeatures, weightingStrategy)\n self._clusterInfo['features']['scaled'] = copy.deepcopy(clusterFeatures)\n # perform clustering\n labels = self._classifyROMs(self._divisionClassifier, features, clusterFeatures)\n uniqueLabels = sorted(list(set(labels))) # note: keep these ordered! Many things hinge on this.\n self.raiseAMessage('Identified {} clusters while training clustered ROM \"{}\".'.format(len(uniqueLabels), self._romName))\n # if there were some segments that won't compare well (e.g. leftovers), handle those separately\n if len(remainder):\n unclusteredROMs = self._trainSubdomainROMs(self._templateROM, remainder, trainingSet, self._romGlobalAdjustments)\n else:\n unclusteredROMs = []\n # make cluster information dict\n self._clusterInfo['labels'] = labels\n ## clustered\n self._clusterInfo['map'] = dict((label, roms[labels == label]) for label in uniqueLabels)\n ## unclustered\n self._clusterInfo['map']['unclustered'] = unclusteredROMs\n # TODO what about the unclustered ones? We throw them out in truncated representation, of necessity.\n self._roms = list(self._clusterInfo['map'][label][0] for label in uniqueLabels)", "def chimera_removal(amplicon_file, minseqlen, mincount, chunk_size, kmer_size):\n # Sequence\n sequences = []\n occ = []\n for de_rep in dereplication_fulllength(amplicon_file, minseqlen, mincount):\n sequences.append(de_rep[0])\n occ.append(de_rep[1])\n\n # Séparation en segment de taille chunk_size + génération du dictionnaire de kmer\n segments, kmer_dico = [], {}\n for i in range(len(sequences)):\n segments.append(get_chunks(sequences[i], chunk_size))\n kmer_dico = get_unique_kmer(kmer_dico, sequences[i], i, kmer_size)\n\n # Génération des best_mates pour un segment donné\n best_mates = []\n for sequence_chunks in segments:\n for each_chunk in sequence_chunks:\n best_mates.append(search_mates(kmer_dico, each_chunk, kmer_size))\n\n # Recherche de séquences parentes - séquences présentes dans toutes les listes\n seq_parentes = common(best_mates[0], best_mates[1])\n\n # Déterminer si une séquence est une chimère\n chimera_id = []\n chunk_seq_list = [get_chunks(sequences[seq_parentes[0]], chunk_size)]\n chunk_seq_list += [get_chunks(sequences[seq_parentes[1]], chunk_size)]\n for i in range(len(sequences)):\n if not i in seq_parentes:\n chunk_chim = get_chunks(sequences[i], chunk_size)\n\n perc_identity_matrix = [[] for c in range(len(chunk_chim))]\n for j in range(len(chunk_seq_list)):\n for l,chunk in enumerate(chunk_chim):\n perc_identity_matrix[l].append(\n get_identity(nw.global_align(chunk, chunk_seq_list[j][l], gap_open=-1, gap_extend=-1, matrix=os.path.abspath(os.path.join(os.path.dirname(__file__), '../agc')) + \"/MATCH\")))\n\n if detect_chimera(perc_identity_matrix):\n chimera_id.append(i)\n\n\n for i in range(len(sequences)):\n if not i in chimera_id:\n yield [sequences[i], occ[i]]", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def test_single_segment(self):\n\n recombination = MagicMock()\n recombination.bases = np.array([5, 10, 15, 20], dtype = np.uint32)\n recombination.cm = np.array([0.00000, 0.001, 0.00125, 0.00215])\n recombination.rates = np.array([0.0002000000, 0.0000500000,\n 0.0001800000, 0.0002000000])\n\n starts = [0]\n stops = [5]\n lengths = cm_lengths(starts, stops, recombination)\n expect = np.array([0.000], dtype = np.float64)\n np.testing.assert_almost_equal(lengths, expect)\n\n starts = [5]\n stops = [10]\n lengths = cm_lengths(starts, stops, recombination)\n expect = np.array([0.001], dtype = np.float64)\n np.testing.assert_almost_equal(lengths, expect)\n\n starts = [10]\n stops = [15]\n lengths = cm_lengths(starts, stops, recombination)\n expect = np.array([0.00025], dtype = np.float64)\n np.testing.assert_almost_equal(lengths, expect)\n\n starts = [15]\n stops = [20]\n lengths = cm_lengths(starts, stops, recombination)\n expect = np.array([0.0009], dtype = np.float64)\n np.testing.assert_almost_equal(lengths, expect)\n\n starts = [11]\n stops = [14]\n lengths = cm_lengths(starts, stops, recombination)\n expect = np.array([0.00015], dtype = np.float64)\n np.testing.assert_almost_equal(lengths, expect)\n\n starts = [11]\n stops = [19]\n lengths = cm_lengths(starts, stops, recombination)\n expect = np.array([0.0009199999999999999], dtype = np.float64)\n np.testing.assert_almost_equal(lengths, expect)\n\n starts = [11]\n stops = [18]\n lengths = cm_lengths(starts, stops, recombination)\n expect = np.array([0.0007399999999999999], dtype = np.float64)\n np.testing.assert_almost_equal(lengths, expect)\n\n starts = [0]\n stops = [20]\n lengths = cm_lengths(starts, stops, recombination)\n expect = np.array([0.00215], dtype = np.float64)\n np.testing.assert_almost_equal(lengths, expect)", "def _trainBySegments(self, divisions, trainingSet):\n # train the subdomain ROMs\n counter, remainder = divisions\n roms = self._trainSubdomainROMs(self._templateROM, counter, trainingSet, self._romGlobalAdjustments)\n # if there were leftover domain segments that didn't go with the rest, train those now\n if remainder:\n unclusteredROMs = self._trainSubdomainROMs(self._templateROM, remainder, trainingSet, self._romGlobalAdjustments)\n roms = np.hstack([roms, unclusteredROMs])\n self._roms = roms", "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def make_sections(self, split_num=1000):\n self.obstacles.add(self.chairlift.pylons.sprites())\n num = max(1, int(len(self.obstacles) / split_num))\n section_length = int(self.map_size[1] / num)\n self.sections = {}\n for y in range(0, self.map_size[1], section_length):\n rect_info = (0, y, self.map_size[0], section_length)\n rect = pg.Rect(rect_info)\n self.sections[rect_info] = pg.sprite.Group([x for x in self.obstacles if rect.collidepoint(x.rect.midbottom)])", "def resetCoronalSegment(self):\r\n #research\r\n profprint()\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(0)\r\n sGreen.SetOrientationToCoronal()\r\n #sw = slicer.app.layoutManager().sliceWidget(\"Green\")\r\n #sw.fitSliceToBackground()\r\n sGreen.Modified()", "def insert_bonuses(self):\n segs = random.sample(self.segments, 2)\n\n for s in segs:\n offset = random.randint(-10, 10) / 10.0\n self.add_sprite(s, \"bonus\", offset)", "def testCoarsenChunks(self):\n chunks = [1,1,2,2,3,3,4,4]\n ds = Dataset(samples=N.arange(len(chunks)).reshape(\n (len(chunks),1)), labels=[1]*8, chunks=chunks)\n coarsenChunks(ds, nchunks=2)\n chunks1 = coarsenChunks(chunks, nchunks=2)\n self.failUnless((chunks1 == ds.chunks).all())\n self.failUnless((chunks1 == N.asarray([0,0,0,0,1,1,1,1])).all())\n\n ds2 = Dataset(samples=N.arange(len(chunks)).reshape(\n (len(chunks),1)), labels=[1]*8)\n coarsenChunks(ds2, nchunks=2)\n self.failUnless((chunks1 == ds.chunks).all())", "def split(self, split_words, min_segments=10):\n valid_exemplars, total_words = self.count_exemplar_words()\n\n # Raise error if we inputs are invalid to avoid infinite loop\n if split_words < 0 or split_words > total_words:\n raise ValueError(\n \"cannot split corpus with {} words into split with {} words\".format(\n total_words, split_words\n )\n )\n\n exemplars_in_split = []\n word_counter, seg_counter = 0, 0\n while word_counter <= split_words or seg_counter <= min_segments:\n exemplars_in_split += [\n valid_exemplars.pop(random.randrange(len(valid_exemplars)))\n ]\n word_counter += exemplars_in_split[-1].n_words\n seg_counter += len(exemplars_in_split[-1].transcript_file.segments)\n\n new_corpus = corpus(\n {\n \"location\": self.location,\n \"exemplars\": exemplars_in_split,\n }\n )\n\n remaining_corpus = self - new_corpus\n remaining_corpus.location = self.location\n\n return remaining_corpus, new_corpus", "def set_calculated_segments(self, total_lights, segments):\n self.set_segments(segments)\n self.set_lights_per_segment(int(total_lights / segments))", "def shuffle_segments(self, segs, unmasked_tokens):\n\n p = np.random.random()\n if p >= 0.8:\n shuf_segs = segs[1:] + unmasked_tokens\n elif p >= 0.6:\n shuf_segs = segs[:-1] + unmasked_tokens\n else:\n shuf_segs = segs + unmasked_tokens\n\n random.shuffle(shuf_segs)\n\n if p >= 0.8:\n shuf_segs = segs[0:1] + shuf_segs\n elif p >= 0.6:\n shuf_segs = shuf_segs + segs[-1:]\n return shuf_segs", "def split(self, train_fraction=0.8, val_fraction=0.2, test_fraction=0, seed=1):\n if self.is_initialized():\n return\n self.ensure_fraction_sum(train_fraction, val_fraction, test_fraction)\n np.random.seed(seed)\n self.samples = sorted(self.samples)\n np.random.shuffle(self.samples)\n train_idx = ceil(train_fraction*(len(self.samples)))\n val_idx = train_idx + ceil(val_fraction*(len(self.samples)))\n test_idx = val_idx + ceil(test_fraction*(len(self.samples)))\n indices = list(range(len(self.samples)))\n self.indices[TRAIN_SUBSET] = indices[:train_idx]\n self.indices[VAL_SUBSET] = indices[train_idx:val_idx]\n self.indices[TEST_SUBSET] = indices[val_idx:test_idx]", "def set_split(self):\n #Regular expressions; try 1 first, then 2, etc.\n rex1 = re.compile('F?LD')\n rex2 = re.compile('[LF]?LQ')\n \n #For regular expression, check if there is a match that is >10 AA from the end\n if re.search(rex1, self.sequence) and len(re.split(rex1, self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex1.finditer(self.sequence)][-1]\n# end += 16 #TODO why +15/16?\n elif re.search(rex2, self.sequence) and len(re.split(rex2,self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex2.finditer(self.sequence)][-1]\n# end += 15\n else:\n self.split_index = -1\n self.core = self.sequence\n self.leader = ''\n return\n self.split_index = end\n self.leader = self.sequence[:end]\n self.core = self.sequence[end:]", "def segment(data):", "def split_segment(self):\n # Selection management\n selected_segment = \\\n self.controller.shared_data.obj_track.selected_segment_idx\n\n if len(selected_segment) > 1:\n messagebox.showerror('Warning',\n 'More than one segment is selected')\n return\n elif len(selected_segment) == 0:\n messagebox.showerror('Warning',\n 'No segment is selected')\n return\n else:\n segment_idx = selected_segment[0]\n df_segment = \\\n self.controller.shared_data.obj_track.get_segment(segment_idx)\n\n # Create interactivity\n del self.split_segment_interaction\n self.split_segment_interaction = SplitSegmentCallback(\n self.controller.shared_data,\n df_segment)\n\n self.split_segment_interaction.connect()", "def tokenize(self):\n count = 0\n for entry in self._entries:\n token_pairs = []\n for relation in entry['relations']:\n assert len(relation) == 3\n token_pairs.append((relation[0][0],relation[1][0],relation[2][0]))\n\n num_rels = len(entry['relations'])\n num_random_rels = (self._max_seq_length - 2) // 3 - num_rels\n\n if num_random_rels>0:\n pass\n # gt_pairs = {(rel[0],rel[2]) for rel in entry['relations']}\n # random_pairs = self._get_random_pair(entry['objects'], gt_pairs, num_random_rels)\n # for pair in list(random_pairs):\n # token_pairs.append((pair[0][0],'background', pair[1][0]))\n else:\n for i in range(-num_random_rels):\n token_pairs.pop()\n\n random.shuffle(token_pairs)\n tokens = []\n for pair in token_pairs:\n tokens.extend(pair)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n tokens_char = tokens\n\n target = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3==2 else -1 for i, x in enumerate(tokens)]\n tokens = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3!=2 else self._tokenizer.vocab.get('[MASK]', self._tokenizer.vocab['[UNK]']) for i, x in enumerate(tokens)]\n \n for i in range(len(tokens)):\n if target[i] != -1:\n print(tokens_char[i],tokens[i],target[i])\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n # input_mask = [1 if i%3==2 else 0 for i in range(len(tokens))]\n # co_attention_mask = [-1 if i%3==2 else 1 for i in range(len(tokens))]\n # co_attention_mask = torch.zeros((self._max_region_num, self._max_seq_length))\n # co_attention_mask[0] = -1\n # co_attention_mask[-1] = -1\n \n if len(tokens) < self._max_seq_length:\n padding = [self._padding_index] * (self._max_seq_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding \n target += [-1] * len(padding) \n\n assert_eq(len(tokens), self._max_seq_length)\n entry['input_ids'] = tokens \n entry[\"input_mask\"] = input_mask\n entry['segment_ids'] = segment_ids\n # entry[\"co_attention_mask\"] = co_attention_mask\n entry['target'] = target\n\n sys.stdout.write('%d/%d\\r' % (count, len(self._entries)))\n sys.stdout.flush()\n count += 1", "def test_creating_a_new_segment(self):\n pass", "def _set_branch_nseg(geo, sec_idx, seg_L):\n\n # iterate over trees in section list\n for tree_key, tree in sec_idx.iteritems():\n\n for sec_i, sec in enumerate(tree):\n\n section = geo[tree_key][sec]\n\n # get section length\n sec_L = section.L\n print 'section length', section.L\n\n # determine number of segments\n n_seg = int(np.ceil(sec_L/seg_L))\n\n # # check that number of segments is odd\n if n_seg % 2 != 0:\n n_seg+=1\n\n # # set number of segments\n section.nseg = n_seg\n print 'nseg', section.nseg\n return geo" ]
[ "0.565537", "0.5480135", "0.54776907", "0.54391086", "0.5423783", "0.5409851", "0.539458", "0.5373258", "0.53153485", "0.5310843", "0.52797747", "0.52563226", "0.5236451", "0.5226697", "0.520109", "0.51921284", "0.51742184", "0.5147782", "0.5139677", "0.5136137", "0.5089941", "0.50826854", "0.506336", "0.50602436", "0.50491625", "0.50460255", "0.50314826", "0.50150555", "0.49823332", "0.4976184" ]
0.73318213
0
Generates a new shuffled corpus with the provided (or generated) reference annotation set,
def corpus_shuffle(self, annotators: Union[int, Iterable[str]], shift: bool = False, false_pos: bool = False, false_neg: bool = False, split: bool = False, cat_shuffle: bool = False, include_ref: bool = False ) -> Continuum: continuum = self.corpus_from_reference(annotators) if shift: self.shift_shuffle(continuum) if false_pos: self.false_pos_shuffle(continuum) if false_neg: self.false_neg_shuffle(continuum) if cat_shuffle: self.category_shuffle(continuum) if split: self.splits_shuffle(continuum) if include_ref: assert self._reference_annotator not in continuum.annotators, \ "Reference annotator can't be included as " \ "an annotator with the same name is in the " \ "generated corpus." for unit in self._reference_continuum[next(iter(self._reference_continuum.annotators))]: continuum.add(self._reference_annotator, unit.segment, unit.annotation) return continuum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shuffle_annotations(annotations):\n shuffled_annotations = []\n while len(annotations)>0:\n random_index = random.randrange(len(annotations))\n line = annotations[random_index]\n shuffled_annotations.append(line)\n annotations.remove(line)\n return shuffled_annotations", "def create_trianing_instances(document_files, mentions_files, tokenizer, max_seq_length,\n\t\t\t\t\t\t\t\trng, is_training=True):\n\tdocuments = {}\n\tfor input_file in document_files:\n\t\twith open(documents_file + \"/\"+ input_file, \"r\") as reader:\n\t\t\twhile True:\n\t\t\t\tline = reader.readline().strip()\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\tline = json.loads(line)\n\t\t\t\tdocuments[line['document_id']] = line\n\n\tmentions = []\n\tfor input_file in mentions_files:\n\t\twith open(input_file, \"r\") as reader:\n\t\t\twhile True:\n\t\t\t\tline = reader.readline().strip()\n\t\t\t\t\n\t\t\t\tif not line:\n\t\t\t\t\tbreak\n\t\t\t\tline = json.loads(line)\n\t\t\t\tmentions.append(line)\n\n\n\ttfidf_candidates = {}\n\twith open(candidates_file, \"r\") as reader:\n\t\twhile True:\n\t\t\tline = reader.readline().strip()\n\n\t\t\tif not line:\n\t\t\t\tbreak\n\t\t\td = json.loads(line)\n\t\t\ttfidf_candidates[d['mention_id']] = d['tfidf_candidates']\n\n\tif split_by_domain:\n\t\tinstances = {}\n\telse:\n\t\tinstances = [] \n\n\tprint(\"--- Total {} mention links ---\".format(len(mentions))) \n\tfor i, mention in enumerate(mentions):\n\t\t# print(\"mention {} \".format(i))\n\t\tinstance = create_instances_from_mention_link(\n\t\t\t\tmention, documents, tfidf_candidates, tokenizer, max_seq_length,\n\t\t\t\trng, is_training=is_training)\n\n\t\tif instance:\n\t\t\tif split_by_domain:\n\t\t\t\tcorpus = mention['corpus']\n\t\t\t\tif corpus not in instances:\n\t\t\t\t\tinstances[corpus] = []\n\t\t\t\tinstances[corpus].append(instance)\n\t\t\telse:\n\t\t\t\tinstances.append(instance)\n\t\t# else:\n\t\t# \tprint(i)\n\n\t\tif i > 0 and i % 1000 == 0:\n\t\t\tprint(\"Instance: %d\" % i)\n\n\tif is_training:\n\t\tif split_by_domain:\n\t\t\tfor corpus in instances:\n\t\t\t\trng.shuffle(instances[corpus])\n\t\telse:\n\t\t\trng.shuffle(instances)\n\n\treturn instances", "def genTrainingSet(set_of_CSVs, file_to_classify, train_size = 5):\n set_of_csvs_minus_target = copy.copy(set_of_CSVs)\n # remove the file we want to classify\n set_of_csvs_minus_target.remove(file_to_classify)\n\n # extract out the random noise files\n # first, set the seed\n random.seed(time.time())\n # now sample\n return_list = random.sample(set_of_csvs_minus_target, train_size)\n return return_list", "def createTrainTestSets():\n tweets = open(noDuplicatesFilename, 'r').read().splitlines()\n name_mapping = loadNameMapping()\n holdoutLocations = [u'Frederiksberg, Danmark', u'T\\xe5rnby, Danmark', u'Kolding, Danmark', u'T\\xe4by, Sverige', u'Kungsbacka, Sverige', u'Kristianstad, Sverige', u'Bod\\xf8, Norge', u'Kvinnherad, Norge', u'Ullensaker, Norge']\n testSetLocation = []\n rest = []\n for tweet in tweets:\n if stringToTweet(tweet).getFullName() in holdoutLocations:\n testSetLocation.append(tweet)\n else:\n rest.append(tweet)\n tweets = rest\n testIndex = int(round(len(tweets) * (1 - test_set_ratio)))\n random.seed(1)\n random.shuffle(tweets)\n trainSet = tweets[:testIndex]\n testSet = tweets[testIndex:]\n open(trainSetFilename, 'w').write('\\n'.join(trainSet))\n open(testSetNormalFilename, 'w').write('\\n'.join(testSet))\n open(testSetLocationFilename, 'w').write('\\n'.join(testSetLocation))\n print \"Wrote %d tweets to train set\" % len(trainSet)\n print \"Wrote %d tweets to normal test set\" % len(testSet)\n print \"Wrote %d tweets to location test set\" % len(testSetLocation)", "def makeAMixOf2Annotations(inputAnnotPath1, inputAnnotPath2, outputMixPath):\n # make sure the paths end in a slash\n if inputAnnotPath1[-1] != u'/':\n inputAnnotPath1 = u'{0}/'.format(inputAnnotPath1)\n if inputAnnotPath2[-1] != u'/':\n inputAnnotPath2 = u'{0}/'.format(inputAnnotPath2)\n if outputMixPath[-1] != u'/':\n outputMixPath = u'{0}/'.format(outputMixPath)\n # for each input open\n for inPath in [inputAnnotPath1, inputAnnotPath2]:\n # open the file, read the lines\n with open(u'{0}sample.en'.format(inPath)) as inEnFile:\n enLns = inEnFile.readlines()\n with open(u'{0}sample.fr'.format(inPath)) as inFrFile:\n frLns = inFrFile.readlines()\n with open(u'{0}sampleAnnotation.tsv'.format(inPath)) as inAnnotFile:\n annotLns = inAnnotFile.readlines()\n with open(u'{0}sampleReference.tsv'.format(inPath)) as inRefFile:\n refLns = inRefFile.readlines()\n with open(u'{0}scores.tsv'.format(inPath)) as inScFile:\n scLns = inScFile.readlines()\n with open(u'{0}scoresAndMetaData.tsv'.format(inPath)) as inScMetaFile:\n scMetaLns = inScMetaFile.readlines()\n # choose and index randomly\n dejaVus = set([])\n while len(dejaVus) < int(len(enLns)/2.0):\n randomInd = randint(0, len(enLns)-1)\n while randomInd in dejaVus:\n randomInd = randint(0, len(enLns)-1)\n # add to dejavus\n dejaVus.add(randomInd)\n # dump to output file\n utilsOs.appendLineToFile(enLns[randomInd], u'{0}sample.en'.format(outputMixPath), addNewLine=False)\n utilsOs.appendLineToFile(frLns[randomInd], u'{0}sample.fr'.format(outputMixPath), False)\n utilsOs.appendLineToFile(annotLns[randomInd], u'{0}sampleAnnotation.tsv'.format(outputMixPath), False)\n utilsOs.appendLineToFile(refLns[randomInd], u'{0}sampleReference.tsv'.format(outputMixPath), False)\n utilsOs.appendLineToFile(scLns[randomInd], u'{0}scores.tsv'.format(outputMixPath), False)\n utilsOs.appendLineToFile(scMetaLns[randomInd], u'{0}scoresAndMetaData.tsv'.format(outputMixPath), False)", "def train(self, seq_genera, drop_single_seq_genera=False):\n seq_genera, seq_genera_again = tee(seq_genera) #two iters\n #[#seqs in genus], {genus: idx}, #total_seqs.\n genus_seqs, genus_idxs, total_seqs = self._get_genus_seqs(\n seq_genera, drop_single_seq_genera)\n #a matrix of words ~ genera seqcounts and {word: idx}.\n seq_counts, word_idxs = self._get_seq_counts(\n seq_genera_again, genus_idxs)\n self._word_posteriors = self._get_word_posteriors(\n seq_counts, genus_seqs, total_seqs)\n self._genus_idxs, self._word_idxs = genus_idxs, word_idxs", "def annotateFiles(listOfFilesPath=None, annotatedOutputFolder=u'./002manuallyAnnotated/', dumpSP=True):\n referencePathLine = []\n listOfAnnotations = []\n # get the list containing the file paths\n if listOfFilesPath is None:\n listOfFilesPath = randomlySelectNDocsFromPath(b000path.getBtFolderPath(flagFolder=None), n=100)\n makeLocalFolderPaths(listOfFilesPath)\n elif type(listOfFilesPath) is str:\n if u'.json' in listOfFilesPath:\n listOfFilesPath = utilsOs.openJsonFileAsDict(listOfFilesPath)\n else:\n listOfFilesPath = [listOfFilesPath]\n # get rid of the files we have already annotated\n if utilsOs.theFileExists(u'{0}sampleReference.tsv'.format(annotatedOutputFolder)):\n refLines = utilsOs.readAllLinesFromFile(u'{0}sampleReference.tsv'.format(annotatedOutputFolder),\n noNewLineChar=True)\n annotatedFiles = set([line.split(u'\\t')[0] for line in refLines])\n listOfFilesPath = [file for file in listOfFilesPath if file not in annotatedFiles]\n # print the annotator cheat sheet\n printCheatSheet()\n # open each file in EN and FR and show it in the terminal\n for filePath in listOfFilesPath:\n print(u'############# {0} ##############'.format(filePath.replace(u'/data/rali8/Tmp/rali/bt/burtrad/corpus_renamed/', u'')))\n # get the path for the source and target\n fileSourcePath = u'{0}.fr'.format(filePath) if u'fr-en' in filePath else u'{0}.en'.format(filePath)\n fileTargetPath = u'{0}.en'.format(filePath) if u'fr-en' in filePath else u'{0}.fr'.format(filePath)\n with open(fileSourcePath) as fileSource:\n with open(fileTargetPath) as fileTarget:\n # show the context of the annotated sentence\n beforeSentSource = fileSource.readline()\n duringSentSource = fileSource.readline()\n beforeSentTarget = fileTarget.readline()\n duringSentTarget = fileTarget.readline()\n # annotate the first sentence pair\n listOfAnnotations = annotateFirstSP(beforeSentSource, duringSentSource, beforeSentTarget,\n duringSentTarget, listOfAnnotations, lineLength=137)\n # save the reference\n # if the filepath is the reference\n if u'burtrad' in filePath:\n referencePathLine.append(u'{0}\\t{1}'.format(filePath, 0))\n # otherwise we get it from a reference file\n else:\n with open(u'{0}.tsv'.format(filePath)) as refFile:\n refLns = [ln.replace(u'\\n', u'') for ln in refFile.readlines()]\n referencePathLine.append(refLns[0])\n # dump the first SP\n if dumpSP is True:\n enSent = beforeSentSource if u'.en' in fileSourcePath else beforeSentTarget\n frSent = beforeSentTarget if u'.en' in fileSourcePath else beforeSentSource\n utilsOs.appendLineToFile(enSent, u'{0}sample.en'.format(annotatedOutputFolder), addNewLine=False)\n utilsOs.appendLineToFile(frSent, u'{0}sample.fr'.format(annotatedOutputFolder), addNewLine=False)\n duringIndex = 1\n # for each line\n while duringSentSource or duringSentTarget:\n # get the correct terminal line length\n lineLength = 137-len(str(len(listOfAnnotations)+1))\n # get the sentences\n afterSentSource = fileSource.readline()\n afterSentTarget = fileTarget.readline()\n # color in red the during lines\n redDuringSource = u'\\033[1;31m{0}\\033[0m'.format(duringSentSource)\n redDuringTarget = u'\\033[1;31m{0}\\033[0m'.format(duringSentTarget)\n # print the sentences\n print(u'{0} - {1}'.format(len(listOfAnnotations)-1, beforeSentSource))\n print(u'{0} - {1}'.format(len(listOfAnnotations)-1, beforeSentTarget))\n print(u'{0} - {1}'.format(len(listOfAnnotations), redDuringSource))\n print(u'{0} - {1}'.format(len(listOfAnnotations), redDuringTarget))\n print(u'{0} - {1}'.format(len(listOfAnnotations)+1, afterSentSource))\n print(u'{0} - {1}'.format(len(listOfAnnotations)+1, afterSentTarget))\n print()\n # count if the lines that take the space of 2 lines\n longLines = getNbLongLines([beforeSentSource, beforeSentTarget, duringSentSource,\n duringSentTarget, afterSentSource, afterSentTarget], lineLength)\n # get the first part of the annotation (aligned or not)\n annotatorGeneralInput = input(u'Aligned-Misaligned annotation: ')\n # make sure to have the right general annotation\n while True:\n if annotatorGeneralInput in [u'0', u'1', u'0.0', u'0.1', u'0.2',\n u'1.0', u'1.1', u'1.2', u'1.3', u'1.4', u'c', u'correction']:\n break\n else:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n annotatorGeneralInput = input(u'Repeat annotation: ')\n if annotatorGeneralInput in [u'c', u'correct']:\n annotatorGeneralInput, listOfAnnotations = correctionToAnnotation(listOfAnnotations)\n # if we still need to specify what type of alignment or misalignment\n if annotatorGeneralInput in [u'0', u'1']:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n # get the second part of the annotation (aligned or not)\n annotatorSpecificInput = input(u'Specific type annotation: ')\n typeAnswers = [u'0', u'1', u'2'] if annotatorGeneralInput == 0 else [u'0', u'1', u'2', u'3', u'4']\n # make sure to have the right specific annotation\n while True:\n if annotatorSpecificInput in typeAnswers:\n break\n else:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n annotatorSpecificInput = input(u'Repeat type annotation: ')\n # save to the list of annotations\n listOfAnnotations.append(float(u'{0}.{1}'.format(annotatorGeneralInput, annotatorSpecificInput)))\n # if the right answer was given in the right format right away\n else:\n # save to the list of annotations\n listOfAnnotations.append(float(annotatorGeneralInput))\n # remove the lines from the terminal before getting to the next pair\n utilsOs.moveUpAndLeftNLines(14+longLines, slowly=False)\n # erase all remainder of the previous sentences and go back up again\n for e in range(14+longLines):\n print(u' '*(lineLength+4))\n utilsOs.moveUpAndLeftNLines(14 + longLines, slowly=False)\n # next line source\n beforeSentSource = duringSentSource\n duringSentSource = afterSentSource\n # next line target\n beforeSentTarget = duringSentTarget\n duringSentTarget = afterSentTarget\n # append the reference to the file\n # if the filepath is the reference\n if u'burtrad' in filePath:\n referencePathLine.append(u'{0}\\t{1}'.format(filePath, duringIndex))\n # otherwise we get it from a reference file\n else:\n with open(u'{0}.tsv'.format(filePath)) as refFile:\n refLns = [ln.replace(u'\\n', u'') for ln in refFile.readlines()]\n referencePathLine.append(refLns[duringIndex])\n # add 1 to index\n duringIndex += 1\n # dump the file line by line, to be sure in case of error\n # dump the reference\n utilsOs.dumpRawLines(referencePathLine, u'{0}sampleReference.tsv'.format(annotatedOutputFolder),\n addNewline=True, rewrite=True)\n # dump the annotation\n utilsOs.dumpRawLines(listOfAnnotations, u'{0}sampleAnnotation.tsv'.format(annotatedOutputFolder),\n addNewline=True, rewrite=True)\n # dump the SP\n if dumpSP is True:\n enSent = beforeSentSource if u'.en' in fileSourcePath else beforeSentTarget\n frSent = beforeSentTarget if u'.en' in fileSourcePath else beforeSentSource\n utilsOs.appendLineToFile(enSent, u'{0}sample.en'.format(annotatedOutputFolder), addNewLine=False)\n utilsOs.appendLineToFile(frSent, u'{0}sample.fr'.format(annotatedOutputFolder), addNewLine=False)\n # clear part of terminal\n utilsOs.moveUpAndLeftNLines(2, slowly=False)", "def build_data_vectors(annotations, tweets, Tfidf_vect, adr_lexicon_dict, should_balance_set=True):\n\n def vectorize_word(word):\n \"\"\"gives vectorized value from TfidfVectorizer for the given word\n If the word is not part of vocabulary, 0 will be returned\n\n # Arguments\n word - word to vectorize\n\n # Returns\n vectorized value\n \"\"\"\n if word in Tfidf_vect.vocabulary_:\n i = Tfidf_vect.vocabulary_[word]\n return Tfidf_vect.idf_[i]\n else:\n return 0\n\n def clean_text(text):\n \"\"\"Cleans the text\n This code snippet is taken from https://towardsdatascience.com/multi-label-text-classification-with-scikit-learn-30714b7819c5\n Author: Susan Li\n\n # Arguments\n text - text to clean\n\n # Returns\n cleaned text\n \"\"\"\n text = text.lower()\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"can not \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\'scuse\", \" excuse \", text)\n text = re.sub('\\W', ' ', text)\n text = re.sub('\\s+', ' ', text)\n text = text.strip(' ')\n return text\n\n X = []\n Y = []\n adr_labels_size = 0\n nonadr_labels_size = 0\n for i, (k, v) in enumerate(annotations.items()):\n tweet_text = clean_text(tweets[k])\n tokens = word_tokenize(tweet_text)\n\n for annotation_index, annotation in enumerate(v):\n prev_token_adr = False\n\n annotated_text = clean_text(annotation['annotatedText'])\n annotated_text_tokens = word_tokenize(annotated_text)\n\n for index, focus_word in enumerate(tokens):\n focus_vector = []\n\n # for Context feature, get index for 3 surrounding words on each side of focus word\n if program_args.context_feature:\n focus_vector.append(vectorize_word(tokens[index-3]) if (index-3 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-2]) if (index-2 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-1]) if (index-1 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index]))\n focus_vector.append(vectorize_word(tokens[index+1]) if (index+1 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+2]) if (index+2 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+3]) if (index+3 < len(tokens)) else 0)\n\n if program_args.adrlexicon_feature:\n if focus_word in adr_lexicon_dict:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n if program_args.prev_adrlexicon_feature:\n if prev_token_adr:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n # assign class label\n if annotation['semanticType'] == 'ADR' and focus_word in annotated_text_tokens:\n Y.append(ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n adr_labels_size += 1\n prev_token_adr = True\n else:\n Y.append(NON_ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n nonadr_labels_size += 1\n prev_token_adr = False\n\n print(\" Dataset size: {}\".format(len(X)))\n print(\" {} class size: {}\".format(ADR_MENTION_CLASS_NAME, adr_labels_size))\n print(\" {} class size: {}\".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))\n\n if should_balance_set:\n X, Y = balance_set(X, Y, adr_labels_size, nonadr_labels_size)\n\n X = scipy.sparse.csr_matrix(X)\n return X, Y", "def test_get_corpus(self):\n references = pre.read_data(self.testfilename)\n corpus = pre.get_corpus(references)\n truth = ['m jones', 'e rundensteiner', 'y huang', 'matthew c jones', \n 'e rundensteiner', 'h kuno', 'p marron', 'v taube', 'y ra', \n 'matthew c jones', 'e rundensteiner', 'y huang', 'mike w miller',\n 'l berg', 'mike w miller', 'c chen', 'd kung', 'j samuel', 'j gao',\n 'p hsia', 'y toyoshima', 'jane j robinson', 'jane j robinson',\n 'a gupta', 'a gonzalez', 'a hamid', 'c overstreet', 'h wahab', 'j wild',\n 'k maly', 's ghanem', 'x zhu', 'mary d brown', 'y patt']\n self.assertEquals(corpus, truth)", "def random_init(self, docs):\n for di in xrange(len(docs)):\n doc = docs[di]\n topics = np.random.randint(self.n_topic, size=len(doc))\n self.topic_assignment.append(topics)\n\n for wi in xrange(len(doc)):\n topic = topics[wi]\n word = doc[wi]\n self.TW[topic, word] += 1\n self.sum_T[topic] += 1\n self.DT[di, topic] += 1", "def generate(self, meaning_representation: dict, reference: str):\n\n outputs = []\n candidate_alignments = self.get_alignments(\n meaning_representation, reference\n )\n\n if len(candidate_alignments) == 0:\n return outputs\n\n closest_set = []\n for candidate in candidate_alignments:\n closest = self.closest_words(\n candidate[1].lemma_.lower(), self.n_similar\n )\n\n if closest is not None:\n closest_set.append(closest)\n else:\n closest_set.append([meaning_representation[candidate[0]]])\n\n if len(closest_set) == 0:\n return outputs\n\n products = list(product(*closest_set))\n\n random.seed(self.seed)\n sample_output = sample(products, self.max_outputs)\n\n for output_instance in sample_output:\n mr = meaning_representation.copy()\n ref = reference\n for candidate, replacement in zip(\n candidate_alignments, output_instance\n ):\n mr[candidate[0]] = replacement\n ref = ref.replace(candidate[1].text, replacement)\n outputs.append((mr, ref))\n\n return outputs", "def books_pipeline():\n\n # set a random seed for reproducability\n rng = random.Random(FLAGS.random_seed)\n\n # BooksCorpus is organized into directories of genre and files of books\n # adventure-all.txt seems to contain all the adventure books in 1 file\n # romance-all.txt is the same. None of the other directories have this,\n # so we will skip it to not double count those books\n file_name_set = set()\n input_files_by_genre = collections.defaultdict(list)\n for path, _, fnames in tf.gfile.Walk(FLAGS.input_file):\n genre = path.split(\"/\")[-1]\n for fname in fnames:\n if fname == \"adventure-all.txt\" or fname == \"romance-all.txt\":\n continue\n if fname in file_name_set:\n continue\n file_name_set.add(fname)\n input_files_by_genre[genre].append(path + \"/\" + fname)\n\n # Sort genres and iterate in order for reproducability\n train_files, dev_files, test_files = [], [], []\n for genre, file_list in sorted(input_files_by_genre.items()):\n rng.shuffle(file_list)\n genre_size = len(file_list)\n test_size = int(FLAGS.test_size * genre_size)\n dev_size = int(FLAGS.dev_size * genre_size)\n test_files.extend(file_list[:test_size])\n dev_files.extend(file_list[test_size:test_size + dev_size])\n train_files.extend(file_list[test_size + dev_size:])\n assert len(file_list[:test_size]) + \\\n len(file_list[test_size:test_size+dev_size]) + \\\n len(file_list[test_size+dev_size:]) == len(file_list)\n\n # make sure there is no test train overlap\n for filename in train_files:\n assert filename not in test_files\n assert filename not in dev_files\n for filename in dev_files:\n assert filename not in test_files\n\n rng.shuffle(train_files)\n rng.shuffle(dev_files)\n rng.shuffle(test_files)\n\n def pipeline(root):\n \"\"\"Beam pipeline for converting Books Corpus files to TF Examples.\"\"\"\n _ = (\n root | \"Create test files\" >> beam.Create(test_files)\n | \"Read test files\" >> beam.FlatMap(read_file)\n | \"test Shuffle\" >> beam.Reshuffle()\n | \"Preproc test docs\" >> beam.FlatMap(preproc_doc)\n | \"record test Shuffle\" >> beam.Reshuffle()\n | \"Write to test tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".\" + FLAGS.format + \".test.tfrecord\",\n num_shards=100))\n _ = (\n root | \"Create dev files\" >> beam.Create(dev_files)\n | \"Read dev files\" >> beam.FlatMap(read_file)\n | \"dev Shuffle\" >> beam.Reshuffle()\n | \"Preproc dev docs\" >> beam.FlatMap(preproc_doc)\n | \"record dev Shuffle\" >> beam.Reshuffle()\n | \"Write to dev tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".\" + FLAGS.format + \".dev.tfrecord\",\n num_shards=100))\n _ = (\n root | \"Create train files\" >> beam.Create(train_files)\n | \"Read train files\" >> beam.FlatMap(read_file)\n | \"train Shuffle\" >> beam.Reshuffle()\n | \"Preproc train docs\" >> beam.FlatMap(preproc_doc)\n | \"record train Shuffle\" >> beam.Reshuffle()\n | \"Write to train tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".\" + FLAGS.format + \".train.tfrecord\",\n num_shards=500))\n return\n\n return pipeline", "def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass", "def make_targets(tgt_re, samples, target_suffix=\"\"):\n tgts = list(set(tgt_re.fmt.format(**unit) + target_suffix for unit in samples))\n return tgts", "def sampling(train_set, train_meta, klass, label, n_samples_pos, rate_neg, fold, path_idxs):\n\tprint('-- SAMPLING TRAINNING')\n\tdirectory_idxs = path_idxs+fold+'/'+str(int(klass))+'/'\n\tif(os.path.isdir(directory_idxs)):\n\t\tprint('loading indexes...')\n\t\tidxs_class_pos = np.loadtxt(directory_idxs+'idxs_pos_train.txt', dtype=int)\n\t\tidxs_class_neg = np.loadtxt(directory_idxs+'idxs_neg_train.txt', dtype=int)\n\telse:\n\t\tidxs_class_pos = (train_meta[ : , label] == klass).nonzero()[0]\n\t\tidxs_class_neg = (train_meta[ : , label] != klass).nonzero()[0]\n\t\tif(n_samples_pos < len(idxs_class_pos)):\n\t\t\tidxs_class_pos = np.random.choice(idxs_class_pos, n_samples_pos)\n\t\tidxs_class_neg = np.random.choice(idxs_class_neg, int(n_samples_pos*rate_neg))\n\t\tprint('saving indexes...')\n\t\tos.makedirs(directory_idxs)\n\t\tnp.savetxt(directory_idxs+'idxs_pos_train.txt', idxs_class_pos, fmt='%d')\n\t\tnp.savetxt(directory_idxs+'idxs_neg_train.txt', idxs_class_neg, fmt='%d')\n\n\ttrain_set = np.vstack((train_set[idxs_class_pos], train_set[idxs_class_neg]))\n\ttrain_meta = np.vstack((train_meta[idxs_class_pos], train_meta[idxs_class_neg]))\n\ttrain_meta[:, label] = 1\n\ttrain_meta[len(idxs_class_pos):, label] = -1\n\treturn [train_set, train_meta]", "def makeRandomSet(weightedSet): \n\tnewtrainingSet = []\n\t\n\t#Make a list starting with zero and ending with MAX_INT\n\t#Where all of values inbetween are the probs of choosing this element\n\tprobs = [0]\n\tfor ex in weightedSet:\t\t\n\t\tprobs.append(ex.weight + probs[-1])\n\t\n\tprobs.append(_MAX_INT)\n\t\t\n\tfor i in range(len(weightedSet)): #Get N new smaples\n\t\trandNum = _random()\n\t\tfor j in range(len(probs) - 1 ):\t\n\t\t\tif randNum >= probs[j] and probs[j+1] > randNum : \n\t\t\t\tnewtrainingSet.append(boostExample(\n\t\t\t\t\tLabeledExample(weightedSet[j-1],label=weightedSet[j-1].label) ,weightedSet[j-1].weight) )\n\t\t\t\tbreak\n\t\t\n\t\n\treturn newtrainingSet", "def construct_annotated_corpora(extraction_path, id_variant_path, corpus_name, target_dir):\n # Read in main ID-annotated file\n df_annotated = pd.read_table(extraction_path, header=None,\n names=['Sentence', 'Total_surprisal', 'Per_word_surprisal', 'Normalized_surprisal',\n 'Total_UID_divergence', 'Per_word_UID_divergence', 'Normalized_UID_divergence'],\n skip_blank_lines=True)\n if id_variant_path is not None:\n # Extract ID-specific sentences from the reference corpus\n df_variant = pd.read_table(id_variant_path, header=None, names=['Sentence'], skip_blank_lines=True)\n target_list = df_variant.iloc[:, 0].tolist()\n target_list = [sent.strip() for sent in target_list]\n else:\n # No extraction, entire reference corpus is considered for further steps\n target_list = df_annotated.iloc[:, 0].tolist()\n target_list = [sent.strip() for sent in target_list]\n\n # Isolate evaluation-relevant features\n df_features = df_annotated.loc[:, ['Sentence', 'Normalized_surprisal', 'Normalized_UID_divergence']]\n surprisals = list()\n uid_divs = list()\n\n # Write the normalized surprisal and UID divergence distributions to file\n features_log_path = os.path.join(target_dir, '{:s}_ID_features.txt'.format(corpus_name))\n print('Writing to {:s} ...'.format(features_log_path))\n with open(features_log_path, 'w') as id_file:\n for line_id in range(len(df_features)):\n sent = df_features.iloc[line_id][0]\n sent_ns = df_features.iloc[line_id][1]\n sent_nud = df_features.iloc[line_id][2]\n\n if sent in target_list:\n id_file.write('{:f}\\t{:f}\\n'.format(sent_ns, sent_nud))\n surprisals += [float(sent_ns)]\n uid_divs += [float(sent_nud)]\n # Calculate corpus statistics\n id_file.write('=' * 10 + '\\n')\n id_file.write('Surprisal max: {:.4f}\\n'.format(np.max(surprisals)))\n id_file.write('Surprisal min: {:.4f}\\n'.format(np.min(surprisals)))\n id_file.write('Surprisal mean: {:.4f}\\n'.format(np.mean(surprisals)))\n id_file.write('Surprisal standard deviation: {:.4f}\\n'.format(np.std(surprisals)))\n id_file.write('=' * 10 + '\\n')\n id_file.write('UID divergence max: {:.4f}\\n'.format(np.max(uid_divs)))\n id_file.write('UID divergence min: {:.4f}\\n'.format(np.min(uid_divs)))\n id_file.write('UID divergence mean: {:.4f}\\n'.format(np.mean(uid_divs)))\n id_file.write('UID divergence standard deviation: {:.4f}\\n'.format(np.std(uid_divs)))\n print('Done.')", "def test_labeled_corpus_saving(self):\n\n original_corpus = [[\"Yo\", \"soy\", \"una\", \"oración\", \"gramatical\", \",\",\n \"regocíjense\", \"en\", \"mi\", \"glória\", \".\"],\n [\"Yo\", \"ungrammatical\", \"es\", \"oración\", \",\"\n \"tú\", \"presumido\", \"elitista\", \".\"]]\n reader = LinguoDatasetReader()\n\n with tempfile.TemporaryDirectory() as temp_dir:\n # first test the grammatical case\n fileName_asG = temp_dir + \"testfile\"\n corpus_tools.save_uniform_labeled_corpus(fileName_asG,\n original_corpus,\n g_label=1)\n loaded_asG = reader.read(fileName_asG)\n self.assertEqual(len(original_corpus), len(loaded_asG))\n for original_sent, loaded_sent in zip(original_corpus, loaded_asG):\n self.assertEqual(loaded_sent.fields[\"g_label\"].label,\n \"grammatical\")\n self.assertEqual(loaded_sent.fields[\"ug_type\"].label, \"G\")\n plain_loaded = [str(token) for\n token in loaded_sent.fields[\"sentence\"].tokens]\n self.assertEqual(plain_loaded, original_sent)\n # Now to test it for ungrammatical (with a valid ug_type)\n fileName_asUG = temp_dir + \"testfileUG\"\n corpus_tools.save_uniform_labeled_corpus(fileName_asUG,\n original_corpus,\n g_label=0, ug_type=\"WS\")\n loaded_asUG = reader.read(fileName_asUG)\n self.assertEqual(len(original_corpus), len(loaded_asUG))\n for original_sent, loaded_sent in zip(original_corpus,\n loaded_asUG):\n self.assertEqual(loaded_sent.fields[\"g_label\"].label,\n \"ungrammatical\")\n self.assertEqual(loaded_sent.fields[\"ug_type\"].label, \"WS\")\n plain_loaded = [str(token) for\n token in loaded_sent.fields[\"sentence\"].tokens]\n self.assertEqual(plain_loaded, original_sent)", "def train(self, train_set, shuffle_batch=True,\n epochs=25, lr_decay=0.95, sqr_norm_lim=9,labels=None,model=None): \n cost = self.negative_log_likelihood(self.y) \n dropout_cost = self.dropout_negative_log_likelihood(self.y)\n # adadelta upgrades: dict of variable:delta\n grad_updates = self.sgd_updates_adadelta(dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n # shuffle dataset and assign to mini batches.\n # if dataset size is not a multiple of batch size, replicate \n # extra data (at random)\n np.random.seed(3435)\n batch_size = self.batch_size\n if train_set.shape[0] % batch_size > 0:\n extra_data_num = batch_size - train_set.shape[0] % batch_size\n #extra_data = train_set[np.random.choice(train_set.shape[0], extra_data_num)]\n perm_set = np.random.permutation(train_set) \n extra_data = perm_set[:extra_data_num]\n new_data = np.append(train_set, extra_data, axis=0)\n else:\n new_data = train_set\n \n shuffled_data = np.random.permutation(new_data) # Attardi\n n_batches = shuffled_data.shape[0]/batch_size\n # divide train set into 90% train, 10% validation sets\n n_train_batches = int(np.round(n_batches*0.8))\n n_val_batches = n_batches - n_train_batches\n train_set = shuffled_data[:n_train_batches*batch_size,:]\n val_set = shuffled_data[n_train_batches*batch_size:,:] \n # push data to gpu \n # the dataset has the format [word_indices,padding,user,label]\n train_set_x, train_set_y = shared_dataset(train_set[:,:-2], train_set[:,-1]) \n train_set_u = theano.shared(np.asarray(train_set[:,-2],dtype='int32')) \n val_set_x = val_set[:,:-2]\n val_set_u = val_set[:,-2]\n val_set_y = val_set[:,-1]\n # val_set_x, val_set_y = shared_dataset(val_set[:,:-1], val_set[:,-1])\n batch_start = self.index * batch_size\n batch_end = batch_start + batch_size\n\n \n \n # compile Theano functions to get train/val/test errors\n \n # val_model = theano.function([self.index], self.errors(self.y),\n # givens={\n # self.x: val_set_x[batch_start:batch_end],\n # self.y: val_set_y[batch_start:batch_end]},\n # allow_input_downcast=True)\n\n # errors on train set\n if self.Users is not None:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]\n },\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n else:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n\n #FIXME: this is a bit weird\n test_y_pred = self.predict(val_set_x)\n make_preds = theano.function([self.x], test_y_pred, allow_input_downcast=True)\n\n test_error = T.mean(T.neq(test_y_pred, self.y))\n test_model = theano.function([self.x, self.y], test_error, allow_input_downcast=True)\n\n\n # start training over mini-batches\n print 'training...' \n best_val_perf = 0\n test_perf = 0 \n patience = 5\n drops = 0\n prev_val_perf = 0 \n for epoch in xrange(epochs):\n start_time = time.time()\n # FIXME: should permute whole set rather than minibatch indexes\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n self.set_zero(self.zero_vec) # CHECKME: Why?\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n self.set_zero(self.zero_vec)\n train_losses = [train_error(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n # test_loss = test_model(val_set_x, val_set_y)\n # test_perf = 1 - test_loss \n # predz = make_preds(val_set_x)\n # val_perf = FmesSemEval(predz, val_set_y, pos_ind, neg_ind)\n val_perf = 0\n info = 'epoch: %i\\%i (%.2f secs) train acc: %.2f %% | val avg fmes: %.2f %%' % (\n epoch,epochs, time.time()-start_time, train_perf * 100., val_perf*100.) \n # from ipdb import set_trace; set_trace()\n if val_perf > prev_val_perf: \n drops=0\n if val_perf >= best_val_perf:\n best_val_perf = val_perf\n info+= \" **\"\n if model:\n # print \"save model\"\n self.save(model)\n # test_loss = test_wmodel(val_set_x, val_set_y)\n # test_perf = 1 - test_loss \n # predz = make_preds(val_set_x)\n # fmes = FmesSemEval(predz, val_set_y, pos_ind, neg_ind)\n # print predz\n # print test_set_y\n # print \"Test performance acc: %.3f | polar fmes:%.3f \" % (test_perf,fmes)\n else: \n drops+=1\n if drops >= patience:\n print \"Ran out of patience...\"\n break\n prev_val_perf = val_perf\n print info\n return test_perf", "def PrepareSets(args, tokenizer, train_set, dev_set, test_set, first_label=False):\n\n # filter out al instances where the emotion is neutral\n train_set = train_set.filter(lambda example: not 27 in example['labels'])\n dev_set = dev_set.filter(lambda example: not 27 in example['labels'])\n test_set = test_set.filter(lambda example: not 27 in example['labels'])\n\n # remove unnecessary columns\n train_set = train_set.remove_columns(['text', 'id'])\n dev_set = dev_set.remove_columns(['text', 'id'])\n test_set = test_set.remove_columns(['text', 'id'])\n\n # function that creates new instances for all labels\n def handle_multiple_labels(batch):\n new_batch = {'attention_mask': [],\n 'input_ids': [],\n 'labels': [],\n 'token_type_ids': [],\n }\n for instance_idx, instance in enumerate(batch['labels']):\n for label in instance:\n new_batch['attention_mask'].append(batch['attention_mask'][instance_idx])\n new_batch['input_ids'].append(batch['input_ids'][instance_idx])\n new_batch['labels'].append(label)\n new_batch['token_type_ids'].append(batch['token_type_ids'][instance_idx])\n return new_batch\n\n # function that takes the first label\n def handle_first_label(batch):\n batch['labels'] = batch['labels'][0]\n return batch\n\n # check which label function to use\n if first_label:\n label_fn = handle_first_label\n batched = False\n else:\n label_fn = handle_multiple_labels\n batched = True\n\n # filter the labels\n train_set = train_set.map(label_fn, batched=batched)\n dev_set = dev_set.map(label_fn, batched=batched)\n test_set = test_set.map(label_fn, batched=batched)\n\n # return the prepared datasets\n return train_set, dev_set, test_set", "def make_text(markov_chains):\n\n random_num = generate_random_number(markov_chains.keys())\n\n random_text = []\n\n start_words = generate_start_words(random_num, markov_chains.keys())\n \n random_text.extend(start_words)\n\n\n for i in range(500):\n word_tuple = (random_text[-2],random_text[-1])\n next_word = add_next_word(word_tuple, markov_chains)\n random_text.append(next_word)\n\n return random_text", "def __init__(self, corpus):\n self.train(corpus)", "def guide(self, doc_list=None):\r\n\r\n with pyro.plate(\"topics\", self.K) as k_vec:\r\n\r\n # Lambda => latent variable for the per-topic word q distribution\r\n Lamda = torch.stack([\r\n pyro.param(\r\n f\"lamda_q_{k}\",\r\n (1 + 0.01*(2*torch.rand(self.V)-1)),\r\n constraint=constraints.positive)\r\n for k in k_vec\r\n ])\r\n\r\n # Beta_q => per-topic word q distribtion\r\n Beta_q = pyro.sample(f\"beta\", dist.Dirichlet(Lamda))\r\n\r\n Theta_q = []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # gamma => q for the per-doc topic vector\r\n gamma = pyro.param(f\"gamma_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n\r\n # theta_q => posterior per-doc topic vector\r\n theta_q = pyro.sample(f\"theta_{d}\", dist.Dirichlet(gamma))\r\n\r\n phi = pyro.param(\r\n f\"phi_q_{d}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive\r\n )\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]) as w_vec:\r\n\r\n phi = torch.stack([\r\n pyro.param(\r\n f\"phi_q_{d}_{w}\",\r\n (1+0.01*(2*torch.rand(self.K)-1))/self.K,\r\n constraint=constraints.positive)\r\n for w in w_vec\r\n ])\r\n\r\n # assign a topic\r\n pyro.sample(f\"z_assignment_{d}\", dist.Categorical(phi))\r\n\r\n Theta_q.append(theta_q)\r\n\r\n Theta_q = torch.stack(Theta_q)\r\n\r\n return Beta_q, Theta_q", "def generateAssociationRule(freqSet):", "def generate_bar_example(\n num_topics=10, num_documents=500, num_words_per_doc=100, alpha=1, beta=1, seed=None\n):\n\n width = 5\n\n vocab_size = width * width\n rng = random.Random()\n if seed is not None:\n rng.seed(seed)\n\n zeros = [[0 for i in range(width)] for j in range(width)]\n topic_squares = [zeros for i in range(num_topics)]\n for i in range(width):\n for j in range(width):\n topic_squares[i][i][j] = 1.0 / width\n for i in range(width):\n for j in range(width):\n topic_squares[width + i][j][i] = 1.0 / width\n topics = []\n for k in range(num_topics):\n topics.append(list(_itertools.chain(*topic_squares[k])))\n\n def weighted_choice(probs):\n total = sum(probs)\n r = rng.uniform(0, total)\n upto = 0\n for i, w in enumerate(probs):\n if upto + w > r:\n return i\n upto += w\n assert False, \"Shouldn't get here\"\n\n documents = []\n thetas = []\n for d in range(num_documents):\n doc = [0 for i in range(width * width)]\n topic_dist = [rng.gammavariate(1, 1) for k in range(num_topics)]\n topic_dist = [z / sum(topic_dist) for z in topic_dist]\n for i in range(num_words_per_doc):\n k = weighted_choice(topic_dist)\n w = weighted_choice(topics[k])\n doc[w] += 1\n thetas.append(topic_dist)\n documents.append(doc)\n\n sparse_documents = []\n for d in documents:\n sd = {}\n for i in range(width):\n for j in range(width):\n k = str(i) + \",\" + str(j)\n sd[k] = d[i * width + j]\n sparse_documents.append(sd)\n bow_documents = turicreate.SArray(sparse_documents)\n return bow_documents", "def poem_generation(magnet,topic):\r\n e = inflect.engine()\r\n antonyms = []\r\n synonyms = []\r\n poem = \"\"\r\n verb = \"\"\r\n # plural\r\n if (e.singular_noun(topic) is False):\r\n verb = \"is\"\r\n else:\r\n verb = \"are\"\r\n \r\n for syn in wordnet.synsets(topic):\r\n for l in syn.lemmas():\r\n synonyms.append(l.name())\r\n if l.antonyms():\r\n antonyms.append(l.antonyms()[0].name())\r\n if (len(set(antonyms)) < 1):\r\n for syn in wordnet.synsets(RAND_LIST[random.randint(0,len(RAND_LIST)-1)]):\r\n for l in syn.lemmas():\r\n synonyms.append(l.name())\r\n if l.antonyms():\r\n antonyms.append(l.antonyms()[0].name())\r\n topic = topic.capitalize()\r\n for i in range(0,random.randint(5,15)):\r\n verse = random.randint(0,6)\r\n question = random.randint(0,6)\r\n # structure\r\n # antonyms\r\n if (verse < 2) and len(antonyms) > 0:\r\n ant_magnet = metaphor_magnet(antonyms[random.randint(0,len(antonyms)-1)])\r\n choice = random.randint(0,len(ant_magnet)-1)\r\n detail = ant_magnet[choice].split(\":\")\r\n if (question < 2):\r\n index = random.randint(0,len(QUESTION)-1)\r\n if (detail[0][0] in ['a','e','i','o','u']):\r\n poem += QUESTION[index] + \" \" + verb + \" \" + topic + \" not like an \" + detail[0] + \" \" + detail[1] + \"?\\n\" \r\n else:\r\n poem += QUESTION[index] + \" \" + verb + \" \" + topic + \" not like a \" + detail[0] + \" \" + detail[1] + \"?\\n\" \r\n else:\r\n if (detail[0][0] in ['a','e','i','o','u']):\r\n poem += topic + \" \" + verb + \" not like an \" + detail[0] + \" \" + detail[1] + \".\\n\" \r\n else:\r\n poem += topic + \" \" + verb + \" not like a \" + detail[0] + \" \" + detail[1] + \".\\n\" \r\n \r\n else:\r\n choice = random.randint(0,len(magnet)-1)\r\n detail = magnet[choice].split(\":\")\r\n if (question < 2):\r\n index = random.randint(0,len(QUESTION)-1)\r\n if (detail[0][0] in ['a','e','i','o','u']):\r\n poem += QUESTION[index] + \" \" + verb + \" \" + topic + \" like an \" + detail[0] + \" \" + detail[1] + \"?\\n\" \r\n else:\r\n poem += QUESTION[index] + \" \" + verb + \" \" + topic + \" like a \" + detail[0] + \" \" + detail[1] + \"?\\n\" \r\n else:\r\n if (detail[0][0] in ['a','e','i','o','u']):\r\n poem += topic + \" \" + verb + \" like an \" + detail[0] + \" \" + detail[1] + \"\\n\" \r\n else:\r\n poem += topic + \" \" + verb + \" like a \" + detail[0] + \" \" + detail[1] + \"\\n\" \r\n \r\n return poem", "def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)", "def _init_dataset(self):\n chars = set()\n with open(self.file_path + \"/words.txt\", 'r') as input_file:\n for line in input_file:\n line_split = line.strip().split('\\t')\n file_name = self.file_path+\"/words/\"+line_split[1]\n gt_text = line_split[0]\n chars = chars.union(set(list(gt_text)))\n self.samples.append((file_name, gt_text))\n input_file.close()\n\n self.char_set = sorted(list(chars))", "def __init__(self, root, which_set, vocab, transform=None):\n self.root = root\n self.img_root = os.path.join(root, 'Img')\n self.ann = json.load(open(os.path.join(root, '{}_labels.json'.format(which_set)),'r'))\n\n self.vocab = vocab\n self.transform = transform\n self.img_list = list(self.ann.keys())\n # transfer categories id to labels\n self.cat2label = {}\n for i, k in enumerate(label_corpus):\n self.cat2label[k] = i\n\n self.num_cats = len(self.cat2label) \n\n # vgnome has varied number of annotations [1, 20], average 5.73\n # we still choose five as the parameter. It can be adjusted later on\n self.num_ann_onebatch = 5\n self.ids = [a for a in range(len(self.ann))]\n\n print('\\t {} train samples from {} set'.format(len(self.ids), which_set ))\n print('\\t {} of categories'.format(self.num_cats))", "def gen_tt_relation(tt_path, rep_word_set, word_embd_dict, expk, weighted):\n tt_relation = set() # remove duplicates\n\n # Step 1: Find the cosine distance between every pair of word embeddings.\n rep_mat = gen_indexed_matrix(rep_word_set, word_embd_dict)\n exp_mat = gen_indexed_matrix(list(word_embd_dict.keys()), word_embd_dict)\n cos_mat = pairwise_distances(rep_mat.embd_matrix, exp_mat.embd_matrix, \"cosine\")\n\n # Step 2: Find expansion words for every representative word.\n if weighted:\n for rep_idx in tqdm(range(len(rep_mat.items))):\n exp_idx_list = cos_mat[rep_idx].argsort()[1:expk+1] # exclude rep word\n for exp_idx in exp_idx_list:\n # Convert cos distance to similarity then shift and rescale.\n # 1) Shift from [-1,1] to [0,2] to ensure positiveness.\n # 2) Normalize to [0,1] to resemble probability.\n sim = str(1-cos_mat[rep_idx][exp_idx]/2) # cos sim = 1-cos dist\n tt_relation.add(rep_mat.items[rep_idx]+\" \"+exp_mat.items[exp_idx]+\" \"+sim)\n else:\n for rep_idx in tqdm(range(len(rep_mat.items))):\n exp_idx_list = cos_mat[rep_idx].argsort()[1:expk+1] # exclude rep word\n for exp_idx in exp_idx_list:\n tt_relation.add(rep_mat.items[rep_idx]+\" \"+exp_mat.items[exp_idx]+\" 1.0\")\n\n # Step 3: Save TT relation.\n with open(tt_path, \"w\") as f:\n f.write(\"\\n\".join(list(tt_relation)))" ]
[ "0.574516", "0.5633824", "0.53477716", "0.53092796", "0.5275466", "0.5191949", "0.51811427", "0.51632196", "0.51452035", "0.5106954", "0.5081439", "0.50716454", "0.50579906", "0.5056524", "0.50563395", "0.5044877", "0.50380534", "0.5030752", "0.50278974", "0.50206995", "0.50053936", "0.5001676", "0.4990927", "0.49875104", "0.49704924", "0.4951441", "0.49430433", "0.4941873", "0.49402285", "0.4936005" ]
0.60172826
0
Generates a QWERTY Manhattan distance resemblance matrix Costs for letter pairs are based on the Manhattan distance of the corresponding keys on a standard QWERTY keyboard.
def qwerty_distance(): from collections import defaultdict import math R = defaultdict(dict) R['-']['-'] = 0 zones = ["dfghjk", "ertyuislcvbnm", "qwazxpo"] keyboard = ["qwertyuiop", "asdfghjkl", "zxcvbnm"] for num, content in enumerate(zones): for char in content: R['-'][char] = num + 1 R[char]['-'] = 3 - num for a in ascii_lowercase: rowA = None posA = None for num, content in enumerate(keyboard): if a in content: rowA = num posA = content.index(a) for b in ascii_lowercase: for rowB, contentB in enumerate(keyboard): if b in contentB: R[a][b] = int(math.fabs(rowB - rowA) + math.fabs(posA - contentB.index(b))) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance_matrix(self, x, y, keyboard_weight=None):\r\n # create distance matrix\r\n size_x = len(x) + 1\r\n size_y = len(y) + 1\r\n dist_matrix = np.zeros((size_x, size_y))\r\n for i in range(size_x):\r\n dist_matrix[i, 0] = i\r\n for j in range(size_y):\r\n dist_matrix[0, j] = j\r\n\r\n ## fill distance matrix\r\n # no keyboard weight\r\n if not keyboard_weight:\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n subs = dist_matrix[i-1, j-1] + 1\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n # manhattan keyboard weight\r\n elif keyboard_weight == \"manhattan\":\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n dist = self.key_distance(x[i-1], y[j-1], keyboard_weight)\r\n subs_weight = dist * self.manhattan_coef\r\n subs = dist_matrix[i-1, j-1] + subs_weight\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n # euclidean keyboard weight\r\n elif keyboard_weight == \"euclidean\":\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n dist = self.key_distance(x[i-1], y[j-1], keyboard_weight)\r\n subs_weight = dist * self.euclidean_coef\r\n subs = dist_matrix[i-1, j-1] + subs_weight\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n \r\n return dist_matrix", "def get_alphabet_similarity_matrix(self):\n distance_matrix = numpy.zeros((len(self.alphabet), len(self.alphabet)))\n numpy.fill_diagonal(distance_matrix, 0)\n for index_one, descriptor_one in enumerate(self.descriptors):\n for index_two, descriptor_two in enumerate(self.descriptors):\n distance = descriptor_one - descriptor_two\n squared_distance = numpy.dot(distance, distance)\n distance_matrix[index_one, index_two] = squared_distance\n distance_matrix /= 2. * (self.sigma_amino_acid ** 2)\n return numpy.exp(-distance_matrix)", "def calculate_manhattan_dist(state):", "def calculate_manhattan_dist(self):\n return self._current_cost + abs(self._current_loc.get_row() - self._goal_loc.get_row()) +\\\n abs(self._current_loc.get_column() - self._goal_loc.get_column())", "def manhattan(self):\n distance = 0\n for i in range(3):\n for j in range(3):\n if self.plateau[i][j] != 0:\n x, y = divmod(self.plateau[i][j]-1, 3)\n distance += abs(x - i) + abs(y - j)\n return distance", "def __init__(self,alphabet=\"amino\",dist_function=\"simple\"):\n\n # initialize internal variables\n self.alphabet = alphabet\n self.dist_function = dist_function\n\n # decide on the alphabet\n if self.alphabet == \"amino\": \n self._alphabet_string = \"*ABCDEFGHIKLMNPQRSTVWXYZ\"\n else:\n raise ValueError(\"alphabet not recongized.\")\n \n if self.dist_function == \"simple\":\n self._dist_function_internal = 0\n elif self.dist_function == \"dl\":\n self._dist_function_internal = 1\n else:\n err = \"dist_function not recognized. should be 'simple' or 'dl' (Damerau-Levenshtein)\\n\"\n raise ValueError(err)\n \n self.alphabet_size = len(list(self._alphabet_string))\n\n enum_list = zip(self._alphabet_string,range(len(self._alphabet_string)))\n self._alphabet_dict = dict([(a, i) for a, i in enum_list])\n\n tmp_matrix = np.zeros((self.alphabet_size,self.alphabet_size),dtype=int)\n for k1 in self._alphabet_string:\n i = self._alphabet_dict[k1] \n for k2 in self._alphabet_string:\n j = self._alphabet_dict[k2]\n if k1 == k2:\n tmp_matrix[i,j] = 0\n else:\n tmp_matrix[i,j] = 1\n\n self.dist_matrix = tmp_matrix", "def test_manhattan_distance(self):\n knn = Knn(n_neighbors=3)\n knn.fit(np.array(little_X), little_Y)\n d = knn._manhattan_distance(np.array([5,6]))\n assert (d == [7, 7]).all(), \"Manhattan Distance is not correct\"", "def manhattan_dist(c1, c2):\n return abs(c1[0] - c2[0]) + abs(c1[1] - c2[1]) + abs(c1[2] - c2[2])", "def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)", "def calc_distance(\n target_batch_keys, target_keys_pred, batch_assignments_gt, src_key_num_gt\n):\n batch_keys_gt = torch.bmm(batch_assignments_gt, target_batch_keys[:, :, :2])\n err = distance(target_keys_pred, batch_keys_gt, src_key_num_gt)\n return err", "def calculate_manhattan_dist(idx, value, n):\n pass", "def distance_dmc(distances, Ks, points):\n doors = []\n for d in distances:\n dmc = []\n for k in Ks:\n print \"==========================\", k, \"==========================\"\n clusters = create_clusters(25, k)\n\n kmeans(points, clusters)\n # print \"Finished creating kmeans algorithm\"\n\n create_backbone_network(GRAPH, clusters, d)\n # print \"Finished creating backbone network\"\n\n find_all_shortest_paths(clusters, SP_TABLE, GRAPH)\n # print \"Finished finding all shortest paths\"\n\n for clst in clusters:\n clst.inter_cost = inter_cost(clst)\n clst.intra_cost = intra_cost(points, clst)\n clst.dm_cost = door_matt_cost(clusters, clst, SP_TABLE)\n\n ret = total_cost(clusters)\n dmc.append(ret[2])\n doors.append(sum(dmc))\n draw_door_matts(map(lambda d: float(format(d, \".4g\")), distances), doors)", "def key_distance(self, x, y, type=\"manhattan\"):\r\n if type == \"manhattan\":\r\n return self.manhattan_dist_matrix[self.keys.index(x), self.keys.index(y)]\r\n elif type == \"euclidean\":\r\n return self.euclidean_dist_matrix[self.keys.index(x), self.keys.index(y)]", "def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat", "def _make_simple_distances():\n distances = {}\n def sym(desired, supported, strength):\n \"Define a symmetric distance between languages.\"\n desired_t = tuple(desired.split('-'))\n supported_t = tuple(supported.split('-'))\n distances[desired_t, supported_t] = strength\n distances[supported_t, desired_t] = strength\n\n def one(desired, supported, strength):\n \"Define a one-way distance between languages.\"\n desired_t = tuple(desired.split('-'))\n supported_t = tuple(supported.split('-'))\n distances[desired_t, supported_t] = strength\n\n def ok(desired, supported):\n \"Define the most common type of link: a one-way distance of 10.\"\n one(desired, supported, 10)\n\n sym('no', 'nb', 1)\n sym('hr', 'bs', 4)\n sym('sh', 'bs', 4)\n sym('sr', 'bs', 4)\n sym('sh', 'hr', 4)\n sym('sr', 'hr', 4)\n sym('sh', 'sr', 4)\n sym('ssy', 'aa', 4)\n one('gsw', 'de', 4)\n one('lb', 'de', 4)\n sym('da', 'no', 8)\n sym('da', 'nb', 8)\n ok('ab', 'ru')\n ok('ach', 'en')\n ok('af', 'nl')\n ok('ak', 'en')\n ok('ay', 'es')\n ok('az', 'ru')\n ok('az-Latn', 'ru-Cyrl')\n ok('be', 'ru')\n ok('bem', 'en')\n ok('bh', 'hi')\n ok('bn', 'en')\n ok('bn-Beng', 'en-Latn')\n ok('br', 'fr')\n ok('ceb', 'fil')\n ok('chr', 'en')\n ok('ckb', 'ar')\n ok('co', 'fr')\n ok('crs', 'fr')\n ok('cy', 'en')\n ok('ee', 'en')\n ok('eo', 'en')\n ok('et', 'fi')\n ok('eu', 'es')\n ok('fo', 'da')\n ok('fy', 'nl')\n ok('ga', 'en')\n ok('gaa', 'en')\n ok('gd', 'en')\n ok('gl', 'es')\n ok('gn', 'es')\n ok('gu', 'hi')\n ok('ha', 'en')\n ok('haw', 'en')\n ok('ht', 'fr')\n ok('hy', 'ru')\n ok('hy-Armn', 'ru-Cyrl')\n ok('ia', 'en')\n ok('ig', 'en')\n ok('is', 'en')\n ok('jv', 'id')\n ok('ka-Geor', 'en-Latn')\n ok('ka', 'en')\n ok('kg', 'fr')\n ok('kk', 'ru')\n ok('km', 'en')\n ok('km-Khmr', 'en-Latn')\n ok('kn', 'en')\n ok('kn-Knda', 'en-Latn')\n ok('kri', 'en')\n ok('ku', 'tr')\n ok('ky', 'ru')\n ok('la', 'it')\n ok('lg', 'en')\n ok('ln', 'fr')\n ok('lo', 'en')\n ok('lo-Laoo', 'en-Latn')\n ok('loz', 'en')\n ok('lua', 'fr')\n ok('mfe', 'en')\n ok('mg', 'fr')\n ok('mi', 'en')\n ok('mk', 'bg')\n ok('ml', 'en')\n ok('ml-Mlym', 'en-Latn')\n ok('mn', 'ru')\n ok('mr', 'hi')\n ok('ms', 'id')\n ok('mt', 'en')\n ok('my', 'en')\n ok('my-Mymr', 'en-Latn')\n ok('ne', 'en')\n ok('ne-Deva', 'en-Latn')\n sym('nn', 'nb', 10)\n ok('nn', 'no')\n ok('nso', 'en')\n ok('ny', 'en')\n ok('nyn', 'en')\n ok('oc', 'fr')\n ok('om', 'en')\n ok('or', 'en')\n ok('or-Orya', 'en-Latn')\n ok('pa', 'en')\n ok('pa-Guru', 'en-Latn')\n ok('pcm', 'en')\n ok('ps', 'en')\n ok('ps-Arab', 'en-Latn')\n ok('qu', 'es')\n ok('rm', 'de')\n ok('rn', 'en')\n ok('rw', 'fr')\n ok('sa', 'hi')\n ok('sd', 'en')\n ok('sd-Arab', 'en-Latn')\n ok('si', 'en')\n ok('si-Sinh', 'en-Latn')\n ok('sn', 'en')\n ok('so', 'en')\n ok('sq', 'en')\n ok('st', 'en')\n ok('su', 'id')\n ok('sw', 'en')\n ok('ta', 'en')\n ok('ta-Taml', 'en-Latn')\n ok('te', 'en')\n ok('te-Telu', 'en-Latn')\n ok('tg', 'ru')\n ok('ti', 'en')\n ok('ti-Ethi', 'en-Latn')\n ok('tk', 'ru')\n ok('tk-Latn', 'ru-Cyrl')\n ok('tlh', 'en')\n ok('tn', 'en')\n ok('to', 'en')\n ok('tt', 'ru')\n ok('tum', 'en')\n ok('ug', 'zh')\n ok('ur', 'en')\n ok('ur-Arab', 'en-Latn')\n ok('uz', 'ru')\n ok('uz-Latn', 'ru-Cyrl')\n ok('wo', 'fr')\n ok('xh', 'en')\n ok('yi', 'en')\n ok('yi-Hebr', 'en-Latn')\n ok('yo', 'en')\n ok('zu', 'en')\n sym('sr-Latn', 'sr-Cyrl', 5)\n one('zh-Hans', 'zh-Hant', 15)\n one('zh-Hant', 'zh-Hans', 19)\n sym('zh-Hant-HK', 'zh-Hant-MO', 3)\n\n return distances", "def setUp(self):\r\n # Create the mapping file/distance matrix combo from the overview\r\n # tutorial.\r\n self.dist_matrix_string = [\"\\tPC.354\\tPC.355\\tPC.356\\tPC.481\\tPC.593\\\r\n \\tPC.607\\tPC.634\\tPC.635\\tPC.636\",\r\n \"PC.354\\t0.0\\t0.625\\t0.623\\t0.61\\t0.577\\\r\n \\t0.729\\t0.8\\t0.721\\t0.765\",\r\n \"PC.355\\t0.625\\t0.0\\t0.615\\t0.642\\t0.673\\\r\n \\t0.776\\t0.744\\t0.749\\t0.677\",\r\n \"PC.356\\t0.623\\t0.615\\t0.0\\t0.682\\t0.737\\\r\n \\t0.734\\t0.777\\t0.733\\t0.724\",\r\n \"PC.481\\t0.61\\t0.642\\t0.682\\t0.0\\t0.704\\\r\n \\t0.696\\t0.675\\t0.654\\t0.696\",\r\n \"PC.593\\t0.577\\t0.673\\t0.737\\t0.704\\t0.0\\\r\n \\t0.731\\t0.758\\t0.738\\t0.737\",\r\n \"PC.607\\t0.729\\t0.776\\t0.734\\t0.696\\t0.731\\\r\n \\t0.0\\t0.718\\t0.666\\t0.727\",\r\n \"PC.634\\t0.8\\t0.744\\t0.777\\t0.675\\t0.758\\\r\n \\t0.718\\t0.0\\t0.6\\t0.578\",\r\n \"PC.635\\t0.721\\t0.749\\t0.733\\t0.654\\t0.738\\\r\n \\t0.666\\t0.6\\t0.0\\t0.623\",\r\n \"PC.636\\t0.765\\t0.677\\t0.724\\t0.696\\t0.737\\\r\n \\t0.727\\t0.578\\t0.623\\t0.0\"]\r\n\r\n self.mapping_string = [\"#SampleID\\tBarcodeSequence\\tTreatment\\tDOB\",\r\n \"PC.354\\tAGCACGAGCCTA\\tControl\\t20061218\",\r\n \"PC.355\\tAACTCGTCGATG\\tControl\\t20061218\",\r\n \"PC.356\\tACAGACCACTCA\\tControl\\t20061126\",\r\n \"PC.481\\tACCAGCGACTAG\\tControl\\t20070314\",\r\n \"PC.593\\tAGCAGCACTTGT\\tControl\\t20071210\",\r\n \"PC.607\\tAACTGTGCGTAC\\tFast\\t20071112\",\r\n \"PC.634\\tACAGAGTCGGCT\\tFast\\t20080116\",\r\n \"PC.635\\tACCGCAGAGTCA\\tFast\\t20080116\",\r\n \"PC.636\\tACGGTGAGTGTC\\tFast\\t20080116\"]\r\n\r\n # Field to test on. Field values are either \"Control\" or \"Fast\".\r\n self.field = 'Treatment'\r\n\r\n # Create a tiny distancy matrix/mapping file with a single sample for\r\n # additional testing.\r\n self.tiny_dist_matrix_string = [\"\\tSamp.1\", \"Samp.1\\t0\"]\r\n self.tiny_mapping_string = [\"#SampleID\\tBarcodeSequence\\tSampleField\",\r\n \"Samp.1\\tAGCACGAGCCTA\\tSampleFieldState1\"]\r\n self.tiny_field = 'SampleField'\r\n\r\n self.small_dist_matrix_string = [\"\\tSamp.1\\tSamp.2\", \"Samp.1\\t0\\t0.5\",\r\n \"Samp.2\\t0.5\\t0\"]\r\n self.small_mapping_string = [\"#SampleID\\tBarcodeSequence\\tSampleField\",\r\n \"Samp.1\\tAGCACGAGCCTA\\tSampleFieldState1\",\r\n \"Samp.2\\tAGCACGAGCCTG\\tSampleFieldState2\"]\r\n self.small_field = 'SampleField'\r\n\r\n # Parse mapping \"files\" (faked here).\r\n self.mapping, self.mapping_header, self.comments = parse_mapping_file(\r\n self.mapping_string)\r\n mapping_data = [self.mapping_header]\r\n mapping_data.extend(self.mapping)\r\n self.groups = group_by_field(mapping_data, self.field)\r\n\r\n self.tiny_mapping, self.tiny_mapping_header, self.tiny_comments = \\\r\n parse_mapping_file(self.tiny_mapping_string)\r\n tiny_mapping_data = [self.tiny_mapping_header]\r\n tiny_mapping_data.extend(self.tiny_mapping)\r\n self.tiny_groups = group_by_field(tiny_mapping_data, self.tiny_field)\r\n\r\n self.small_mapping, self.small_mapping_header, self.small_comments = \\\r\n parse_mapping_file(self.small_mapping_string)\r\n small_mapping_data = [self.small_mapping_header]\r\n small_mapping_data.extend(self.small_mapping)\r\n self.small_groups = group_by_field(small_mapping_data,\r\n self.small_field)\r\n\r\n # Parse distance matrix \"files\" (faked here).\r\n self.dist_matrix_header, self.dist_matrix = parse_distmat(\r\n self.dist_matrix_string)\r\n\r\n self.tiny_dist_matrix_header, self.tiny_dist_matrix = parse_distmat(\r\n self.tiny_dist_matrix_string)\r\n\r\n self.small_dist_matrix_header, self.small_dist_matrix = parse_distmat(\r\n self.small_dist_matrix_string)\r\n\r\n # extract_per_individual* input data\r\n self.individual_states_and_responses_map_f1 = \\\r\n parse_mapping_file_to_dict(\r\n individual_states_and_responses_map_f1.split('\\n'))[0]\r\n self.individual_states_and_responses_map_f2 = \\\r\n parse_mapping_file_to_dict(\r\n individual_states_and_responses_map_f2.split('\\n'))[0]\r\n self.paired_difference_biom1 = \\\r\n parse_biom_table(paired_difference_biom_f1.split('\\n'))", "def heuristic_manhattan_distance(self):\n distance = 0\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n i1, j1 = self._get_coordinates(self.position[i][j], self.PUZZLE_END_POSITION)\n distance += abs(i - i1) + abs(j - j1)\n\n return distance", "def get_manhattan_distance(coord_a, coord_b):\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)", "def manhattan_distance(self):\n dist = 0\n for target, tile in zip(self.winCdt[:-1], self.tiles[:-1]):\n dist += abs(target[0] - tile[0]) + abs(target[1] - tile[1])\n return dist", "def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))", "def test_align_sanity(self):\n # QWERTY resemblance matrix:\n R = qwerty_distance()\n diff, u, r = min_difference_align(\"polynomial\", \"exponential\", R)\n # Warning: we may (read: 'will') use another matrix!\n self.assertEqual(diff, 15)\n # Warning: there may be other optimal matchings!\n self.assertEqual(u, '--polyn-om-ial')\n self.assertEqual(r, 'exp-o-ne-ntial')", "def calc_dist_matrix(self):\n\n self.dist_matrix = spatial.distance.squareform(spatial.distance.pdist(self.data_vector,metric=\"hamming\"))\n\n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)", "def get_manhattan_dist(row1, col1, row2, col2):\n distHoriz = abs(row1 - row2)\n distVert = abs(col1 - col2)\n dist = distHoriz + distVert\n return dist", "def _get_kriging_matrix(self, n, exact_values):\n\n xyz = np.concatenate((self.X_ADJUSTED[:, np.newaxis],\n self.Y_ADJUSTED[:, np.newaxis],\n self.Z_ADJUSTED[:, np.newaxis]), axis=1)\n d = cdist(xyz, xyz, 'euclidean')\n a = np.zeros((n+1, n+1))\n a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d)\n if not exact_values:\n if self.variogram_model == 'linear':\n np.fill_diagonal(a, self.variogram_model_parameters[1])\n elif self.variogram_model != 'custom':\n np.fill_diagonal(a, self.variogram_model_parameters[2])\n else :\n np.fill_diagonal(a, 0.)\n a[n, :-1] = 1.0\n a[:-1, n] = 1.0\n\n return a", "def test_matrix_distance(self):\n # note that the score matrix must contain 'diagonal' elements m[i][i]\n # to avoid failure when the sequences match.\n m = {\"U\": {\"U\": 0, \"C\": 1, \"A\": 5}, \"C\": {\"C\": 0, \"A\": 2, \"G\": 4}}\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"UCACGG\", m), 14)\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"\", m), 0)\n self.assertEqual(self.RNA(\"UUU\").matrix_distance(\"CAC\", m), 7)\n self.assertRaises(KeyError, self.RNA(\"UUU\").matrix_distance, \"CAG\", m)", "def setUp(self):\r\n # The unweighted unifrac distance matrix from the overview tutorial.\r\n self.overview_dm_str = [\"\\tPC.354\\tPC.355\\tPC.356\\tPC.481\\tPC.593\\\r\n \\tPC.607\\tPC.634\\tPC.635\\tPC.636\",\r\n \"PC.354\\t0.0\\t0.595483768391\\t0.618074717633\\\r\n \\t0.582763100909\\t0.566949022108\\\r\n \\t0.714717232268\\t0.772001731764\\\r\n \\t0.690237118413\\t0.740681707488\",\r\n \"PC.355\\t0.595483768391\\t0.0\\t0.581427669668\\\r\n \\t0.613726772383\\t0.65945132763\\\r\n \\t0.745176523638\\t0.733836123821\\\r\n \\t0.720305073505\\t0.680785600439\",\r\n \"PC.356\\t0.618074717633\\t0.581427669668\\t0.0\\\r\n \\t0.672149021573\\t0.699416863323\\\r\n \\t0.71405573754\\t0.759178215168\\\r\n \\t0.689701276341\\t0.725100672826\",\r\n \"PC.481\\t0.582763100909\\t0.613726772383\\\r\n \\t0.672149021573\\t0.0\\t0.64756120797\\\r\n \\t0.666018240373\\t0.66532968784\\\r\n \\t0.650464714994\\t0.632524644216\",\r\n \"PC.593\\t0.566949022108\\t0.65945132763\\\r\n \\t0.699416863323\\t0.64756120797\\t0.0\\\r\n \\t0.703720200713\\t0.748240937349\\\r\n \\t0.73416971958\\t0.727154987937\",\r\n \"PC.607\\t0.714717232268\\t0.745176523638\\\r\n \\t0.71405573754\\t0.666018240373\\\r\n \\t0.703720200713\\t0.0\\t0.707316869557\\\r\n \\t0.636288883818\\t0.699880573956\",\r\n \"PC.634\\t0.772001731764\\t0.733836123821\\\r\n \\t0.759178215168\\t0.66532968784\\\r\n \\t0.748240937349\\t0.707316869557\\t0.0\\\r\n \\t0.565875193399\\t0.560605525642\",\r\n \"PC.635\\t0.690237118413\\t0.720305073505\\\r\n \\t0.689701276341\\t0.650464714994\\\r\n \\t0.73416971958\\t0.636288883818\\\r\n \\t0.565875193399\\t0.0\\t0.575788039321\",\r\n \"PC.636\\t0.740681707488\\t0.680785600439\\\r\n \\t0.725100672826\\t0.632524644216\\\r\n \\t0.727154987937\\t0.699880573956\\\r\n \\t0.560605525642\\t0.575788039321\\t0.0\"]\r\n self.overview_dm = DistanceMatrix.from_file(self.overview_dm_str)\r\n\r\n # The overview tutorial's metadata mapping file.\r\n self.overview_map_str = [\"#SampleID\\tBarcodeSequence\\tTreatment\\tDOB\",\r\n \"PC.354\\tAGCACGAGCCTA\\tControl\\t20061218\",\r\n \"PC.355\\tAACTCGTCGATG\\tControl\\t20061218\",\r\n \"PC.356\\tACAGACCACTCA\\tControl\\t20061126\",\r\n \"PC.481\\tACCAGCGACTAG\\tControl\\t20070314\",\r\n \"PC.593\\tAGCAGCACTTGT\\tControl\\t20071210\",\r\n \"PC.607\\tAACTGTGCGTAC\\tFast\\t20071112\",\r\n \"PC.634\\tACAGAGTCGGCT\\tFast\\t20080116\",\r\n \"PC.635\\tACCGCAGAGTCA\\tFast\\t20080116\",\r\n \"PC.636\\tACGGTGAGTGTC\\tFast\\t20080116\"]\r\n self.overview_map = MetadataMap.parseMetadataMap(self.overview_map_str)\r\n\r\n self.test_map_str = [\r\n \"#SampleID\\tBarcodeSequence\\tFoo\\tBar\\tDescription\",\r\n \"PC.354\\tAGCACGAGCCTA\\tfoo\\ta\\t354\",\r\n \"PC.355\\tAACTCGTCGATG\\tfoo\\ta\\t355\",\r\n \"PC.356\\tACAGACCACTCA\\tbar\\ta\\t356\",\r\n \"PC.481\\tACCAGCGACTAG\\tfoo\\ta\\t481\",\r\n \"PC.593\\tAGCAGCACTTGT\\tbar\\ta\\t593\",\r\n \"PC.607\\tAACTGTGCGTAC\\tbar\\ta\\t607\",\r\n \"PC.634\\tACAGAGTCGGCT\\tbar\\ta\\t634\",\r\n \"PC.635\\tACCGCAGAGTCA\\tfoo\\ta\\t635\",\r\n \"PC.636\\tACGGTGAGTGTC\\tbar\\ta\\t636\"]\r\n self.test_map = MetadataMap.parseMetadataMap(self.test_map_str)\r\n\r\n # A 1x1 dm.\r\n self.single_ele_dm = DistanceMatrix([[0]], ['s1'])\r\n\r\n # How many times to test a p-value.\r\n self.p_val_tests = 10", "def setUp(self):\r\n # The unweighted unifrac distance matrix from the overview tutorial.\r\n self.overview_dm_str = [\"\\tPC.354\\tPC.355\\tPC.356\\tPC.481\\tPC.593\\\r\n \\tPC.607\\tPC.634\\tPC.635\\tPC.636\",\r\n \"PC.354\\t0.0\\t0.595483768391\\t0.618074717633\\\r\n \\t0.582763100909\\t0.566949022108\\\r\n \\t0.714717232268\\t0.772001731764\\\r\n \\t0.690237118413\\t0.740681707488\",\r\n \"PC.355\\t0.595483768391\\t0.0\\t0.581427669668\\\r\n \\t0.613726772383\\t0.65945132763\\\r\n \\t0.745176523638\\t0.733836123821\\\r\n \\t0.720305073505\\t0.680785600439\",\r\n \"PC.356\\t0.618074717633\\t0.581427669668\\t0.0\\\r\n \\t0.672149021573\\t0.699416863323\\\r\n \\t0.71405573754\\t0.759178215168\\\r\n \\t0.689701276341\\t0.725100672826\",\r\n \"PC.481\\t0.582763100909\\t0.613726772383\\\r\n \\t0.672149021573\\t0.0\\t0.64756120797\\\r\n \\t0.666018240373\\t0.66532968784\\\r\n \\t0.650464714994\\t0.632524644216\",\r\n \"PC.593\\t0.566949022108\\t0.65945132763\\\r\n \\t0.699416863323\\t0.64756120797\\t0.0\\\r\n \\t0.703720200713\\t0.748240937349\\\r\n \\t0.73416971958\\t0.727154987937\",\r\n \"PC.607\\t0.714717232268\\t0.745176523638\\\r\n \\t0.71405573754\\t0.666018240373\\\r\n \\t0.703720200713\\t0.0\\t0.707316869557\\\r\n \\t0.636288883818\\t0.699880573956\",\r\n \"PC.634\\t0.772001731764\\t0.733836123821\\\r\n \\t0.759178215168\\t0.66532968784\\\r\n \\t0.748240937349\\t0.707316869557\\t0.0\\\r\n \\t0.565875193399\\t0.560605525642\",\r\n \"PC.635\\t0.690237118413\\t0.720305073505\\\r\n \\t0.689701276341\\t0.650464714994\\\r\n \\t0.73416971958\\t0.636288883818\\\r\n \\t0.565875193399\\t0.0\\t0.575788039321\",\r\n \"PC.636\\t0.740681707488\\t0.680785600439\\\r\n \\t0.725100672826\\t0.632524644216\\\r\n \\t0.727154987937\\t0.699880573956\\\r\n \\t0.560605525642\\t0.575788039321\\t0.0\"]\r\n\r\n # The overview tutorial's metadata mapping file.\r\n self.overview_map_str = [\"#SampleID\\tBarcodeSequence\\tTreatment\\tDOB\",\r\n \"PC.354\\tAGCACGAGCCTA\\tControl\\t20061218\",\r\n \"PC.355\\tAACTCGTCGATG\\tControl\\t20061218\",\r\n \"PC.356\\tACAGACCACTCA\\tControl\\t20061126\",\r\n \"PC.481\\tACCAGCGACTAG\\tControl\\t20070314\",\r\n \"PC.593\\tAGCAGCACTTGT\\tControl\\t20071210\",\r\n \"PC.607\\tAACTGTGCGTAC\\tFast\\t20071112\",\r\n \"PC.634\\tACAGAGTCGGCT\\tFast\\t20080116\",\r\n \"PC.635\\tACCGCAGAGTCA\\tFast\\t20080116\",\r\n \"PC.636\\tACGGTGAGTGTC\\tFast\\t20080116\"]\r\n\r\n # The prefix to use for temporary files/dirs. This prefix may be added\r\n # to, but all temp dirs and files created by the tests will have this\r\n # prefix at a minimum.\r\n self.prefix = 'qiime_RExecutor_tests'\r\n\r\n self.start_dir = getcwd()\r\n self.dirs_to_remove = []\r\n self.files_to_remove = []\r\n\r\n self.tmp_dir = get_qiime_temp_dir()\r\n\r\n if not exists(self.tmp_dir):\r\n makedirs(self.tmp_dir)\r\n\r\n # If test creates the temp dir, also remove it.\r\n self.dirs_to_remove.append(self.tmp_dir)\r\n\r\n # Create temporary input dir/files.\r\n self.input_dir = mkdtemp(dir=self.tmp_dir,\r\n prefix='%s_input_dir_' % self.prefix)\r\n self.dirs_to_remove.append(self.input_dir)\r\n\r\n self.dm_fp = join(self.input_dir, 'dm.txt')\r\n dm_f = open(self.dm_fp, 'w')\r\n for line in self.overview_dm_str:\r\n dm_f.write(line + \"\\n\")\r\n dm_f.close()\r\n self.files_to_remove.append(self.dm_fp)\r\n\r\n self.map_fp = join(self.input_dir, 'map.txt')\r\n map_f = open(self.map_fp, 'w')\r\n for line in self.overview_map_str:\r\n map_f.write(line + \"\\n\")\r\n map_f.close()\r\n self.files_to_remove.append(self.map_fp)\r\n\r\n # Create temporary output directory.\r\n self.output_dir = mkdtemp(dir=self.tmp_dir,\r\n prefix='%s_output_dir_' % self.prefix)\r\n self.dirs_to_remove.append(self.output_dir)", "def manhattan_distance(self):\n return calculate_manhattan_distance(self.location, self.target_location)", "def question7(seq_x, seq_y):\n \n diag_score = 2\n off_diag_score = 1\n dash_score = 0\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n score_matrix = student.build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score)\n \n align_matrix = student.compute_alignment_matrix(seq_x, seq_y, score_matrix, True)\n score, align_x, align_y = student.compute_global_alignment(seq_x, seq_y, score_matrix, align_matrix)\n \n edit_distance = len(seq_x) + len(seq_y) - score\n \n print \"Edit distance: \" + str(edit_distance)\n print align_x\n print align_y", "def calculate_distance_matrix(played_decks: Union[List[FuzzyDeck], List[Deck]], measure: str):\n deck_data = np.array(played_decks).reshape(len(played_decks), 1)\n if measure == \"jaccard\":\n dist = pdist(deck_data, lambda u, v: u[0].jaccard_distance(v[0]))\n elif measure == \"euclidean\":\n dist = pdist(deck_data, lambda u, v: u[0].euclidean_distance(v[0]))\n else:\n raise ValueError(\"Unknown distance measure {}. \".format(measure) +\n \"Please choose one of the following distance measures ['euclidean','jaccard']\")\n\n return dist" ]
[ "0.63121146", "0.60487026", "0.58539367", "0.57887495", "0.5680378", "0.5640274", "0.5589275", "0.5494875", "0.5494359", "0.5468434", "0.5434783", "0.5410972", "0.54078287", "0.5402172", "0.53882927", "0.5361496", "0.5337732", "0.53260463", "0.53234446", "0.53033936", "0.52948624", "0.5258136", "0.52415526", "0.5234575", "0.52345073", "0.5215295", "0.5215295", "0.52061194", "0.5200631", "0.5189014" ]
0.60570216
1
Difference sanity test Given a simple resemblance matrix, test that the reported difference is the expected minimum. Do NOT assume we will always use this resemblance matrix when testing!
def test_diff_sanity(self): alphabet = ascii_lowercase + '-' # The simplest (reasonable) resemblance matrix: R = dict( [ ( a, dict( [ ( b, (0 if a==b else 1) ) for b in alphabet ] ) ) for a in alphabet ] ) # Warning: we may (read: 'will') use another matrix! self.assertEqual(min_difference("dinamck","dynamic",R),3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testMatrix(m):\n print \"Testing the spread matrix:\"\n for i in m.matrix:\n if float('%.3g' % sum(i)) != 1.000 and sum(i) != 0:\n print \"The spread is not as expected\", sum(i)\n return\n print \"Matrix is acceptable\"", "def test_compare(self): \n d1 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.1]])\n )\n d2 = heat(\n np.array([[0.5, 1]]),\n np.array([[0.5, 1.5]])\n )\n\n # These are very loose bounds\n assert d1 < d2", "def test_check_matrix_threshold():\n R = np.array([\n [-9.15361835e-01, 4.01808328e-01, 2.57475872e-02],\n [5.15480570e-02, 1.80374088e-01, -9.82246499e-01],\n [-3.99318925e-01, -8.97783496e-01, -1.85819250e-01]])\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)", "def test_min_matrix_shape(self):\n\n\t\tdetails = self.watcher.describe(min_evals=30)\n\t\tprint(details)\n\n\t\tfor nev in details.num_evals:\n\t\t\tself.assertGreaterEqual(nev, 30)", "def test_to_from_matrix(self):\n # The equality is only guaranteed up to a sign\n converted = rowan.from_matrix(rowan.to_matrix(input1))\n self.assertTrue(\n np.all(\n np.logical_or(\n np.isclose(input1 - converted, 0),\n np.isclose(input1 + converted, 0),\n )\n )\n )", "def test_get_molecule_least_similar_to(self):\n csv_fpath = self.smiles_seq_to_xl_or_csv(ftype=\"csv\")\n for descriptor in SUPPORTED_FPRINTS:\n for similarity_measure in SUPPORTED_SIMILARITIES:\n molecule_set = MoleculeSet(\n molecule_database_src=csv_fpath,\n molecule_database_src_type=\"csv\",\n fingerprint_type=descriptor,\n similarity_measure=similarity_measure,\n is_verbose=False,\n )\n for mol_smile, mol in zip(TEST_SMILES,\n molecule_set.molecule_database):\n compare_task = CompareTargetMolecule(\n target_molecule_smiles=mol_smile)\n [furthest_mol], [similarity] = compare_task.\\\n get_hits_dissimilar_to(molecule_set)\n mol_similarities = molecule_set.compare_against_molecule(\n mol)\n self.assertEqual(\n np.min(mol_similarities),\n mol.get_similarity_to(\n molecule_set.molecule_database[furthest_mol],\n molecule_set.similarity_measure\n ),\n f\"Expected furthest mol to have minimum \"\n f\"similarity to target molecule \"\n f\"using similarity measure: {similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\",\n )\n self.assertGreaterEqual(similarity, 0.,\n \"Expected similarity value to \"\n \"be >= 0.\"\n f\"using similarity measure: \"\n f\"{similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\")\n self.assertLessEqual(similarity, 1.,\n \"Expected similarity value to \"\n \"be <= 1.\"\n f\"using similarity measure: \"\n f\"{similarity_measure}, \"\n f\"descriptor: {descriptor}, \"\n f\"for molecule {mol.mol_text}\"\n )", "def test_normalize_matrix(self):\n input_matrix = [\n [0, 1.0],\n [1.0, 1.0]\n ]\n\n expected = [\n [0, 1],\n [0.5, 0.5]\n ]\n\n result = self.summarizer.normalize_matrix(input_matrix)\n\n self.assertEqual(expected, result)", "def test_call_small(self):\r\n # The expected output was verified with vegan's mantel correlogram\r\n # function.\r\n obs = self.small_mc()\r\n\r\n exp_method_name = 'Mantel Correlogram'\r\n self.assertEqual(obs['method_name'], exp_method_name)\r\n\r\n exp_class_index = [3.0, 5.0, 7.0]\r\n assert_almost_equal(obs['class_index'], exp_class_index)\r\n\r\n exp_num_dist = [2, 2, 2]\r\n self.assertEqual(obs['num_dist'], exp_num_dist)\r\n\r\n exp_mantel_r = [0.86602540378443871, None, None]\r\n self.compare_multiple_level_array(obs['mantel_r'], exp_mantel_r)\r\n\r\n # Test matplotlib Figure for a sane state.\r\n obs_fig = obs['correlogram_plot']\r\n obs_ax = obs_fig.get_axes()[0]\r\n self.assertEqual(obs_ax.get_title(), \"Mantel Correlogram\")\r\n self.assertEqual(obs_ax.get_xlabel(), \"Distance class index\")\r\n self.assertEqual(obs_ax.get_ylabel(), \"Mantel correlation statistic\")\r\n assert_almost_equal(obs_ax.get_xticks(), [2.85, 2.9, 2.95, 3., 3.05,\r\n 3.1, 3.15, 3.2])\r\n assert_almost_equal(obs_ax.get_yticks(), [0.82, 0.83, 0.84, 0.85,\r\n 0.86, 0.87, 0.88, 0.89, 0.9, 0.91])\r\n\r\n # Test p-values and corrected p-values.\r\n found_match = False\r\n for i in range(self.p_val_tests):\r\n obs = self.small_mc()\r\n p_vals = obs['mantel_p']\r\n corr_p_vals = obs['mantel_p_corr']\r\n self.assertEqual(len(p_vals), 3)\r\n self.assertEqual(p_vals[1:], [None, None])\r\n self.assertTrue(0.0 <= p_vals[0] <= 1.0)\r\n self.compare_multiple_level_array(corr_p_vals, p_vals)\r\n\r\n if p_vals[0] >= 0 and p_vals[0] <= 0.5:\r\n found_match = True\r\n break\r\n self.assertTrue(found_match)", "def test_sanity_check (self):\n X, Y = self.dm.get_data(std=True, lag_indicator=True)\n\n # Ensure number of rows between what we expect.\n row_bound = (800, 1000)\n actual_rows = X.shape[0]\n msg = 'Number of rows not within expected bounds.'\n self.assertTrue(row_bound[0] < actual_rows < row_bound[1], msg)\n\n msg = 'X and Y have different number of rows.'\n self.assertEqual(X.shape[0], Y.shape[0], msg)\n\n # Ensure X columns match.\n expected_x_cols = ['SP500', 'ltc_px_std', 'xrp_px_std', 'xlm_px_std',\n 'eth_px_std', 'btc_px_std', 'ltc_volume_std',\n 'xrp_volume_std', 'xlm_volume_std', 'eth_volume_std',\n 'btc_volume_std', 'lagged_others']\n actual_x_cols = X.columns.tolist()\n msg = 'Number of X columns different than expected.'\n self.assertEqual(len(actual_x_cols), len(expected_x_cols), msg)\n\n for col in expected_x_cols:\n msg = 'Expected column not found: {}'.format(col)\n self.assertTrue(col in actual_x_cols, msg)", "def test_from_matrix(self):\n self.assertTrue(np.all(rowan.from_matrix(np.eye(3)) == one))\n\n with self.assertRaises(ValueError):\n self.assertTrue(np.allclose(rowan.from_matrix(2 * np.eye(3))))\n\n mat = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])\n\n self.assertTrue(\n np.logical_or(\n np.allclose(rowan.from_matrix(mat), half),\n np.allclose(rowan.from_matrix(mat), -half),\n )\n )\n\n mat = np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])\n v = np.copy(half)\n v[3] *= -1\n self.assertTrue(np.allclose(rowan.from_matrix(mat), v))", "def test_normalize_matrix_with_only_zeros(self):\n input_matrix = [\n [0, 0],\n [0, 0]\n ]\n\n expected = [\n [0, 0],\n [0, 0]\n ]\n\n result = self.summarizer.normalize_matrix(input_matrix)\n\n self.assertEqual(expected, result)", "def matrixCompare(A, B):\r\n [m, r] = MatrixMath.matrixSize(A)\r\n [m_c, r_c] = MatrixMath.matrixSize(B)\r\n\r\n expTot = (m_c * r_c)\r\n resTot = 0\r\n\r\n if (m == m_c) and (r == r_c):\r\n for row in range(m):\r\n for col in range(r):\r\n if math.isclose(A[row][col], B[row][col]) is not True:\r\n print(\"Element [{0},{1}] is incorrect\".format(row, col))\r\n return [False, expTot, resTot]\r\n else:\r\n resTot += 1\r\n if expTot != resTot:\r\n print(\"\\r\\nResulting matrix dimensions match the expected matrix dimensions\")\r\n return [True, expTot, resTot]\r\n else:\r\n print(\"Error: Resulting matrix dimensions do not match the expected matrix dimensions\")\r\n return [False, expTot, resTot]\r\n\r\n return [True, expTot, resTot]", "def test_constructed_is_small(self):\n self.assertTrue(all(elt<10 for elt in goodwinsheaf.checkradii()))#check all entries have small radii", "def test_cmatrix_simple(self):\n\n test_dtraj = np.array([0, 1, 1, 0, 0, 0, 1, 1, 1, 1])\n cmatrix_compare = np.array([[2., 2.], [1., 4.]])\n cmatrix_computed = cmatrix(test_dtraj)\n self.assertTrue(np.allclose(cmatrix_compare, cmatrix_computed))", "def test_build_stump(self):\n D = np.mat(np.ones((5, 1)) / 5)\n best, min_err, best_estimate =\\\n ada_boost.build_stump(self.larger_matrix,\n self.larger_class_labels,\n D)\n expected = {'threshold': 1.3, 'dim': 0, 'inequal': 'lt'}\n self.assertEqual(best, expected)", "def test_exception():\n mat2D = MatrixDouble([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]])\n mat3D = MatrixDouble([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]])\n\n with pytest.raises(ValueError):\n simplify_line_2d(mat3D, 0.1, True)\n\n with pytest.raises(ValueError):\n simplify_line_3d(mat2D, 0.1, True)", "def assert_equal_matrices(array, matrix1, matrix2, periodic):\n nonlocal CUTOFF\n indices = np.where(matrix1 != matrix2)\n for index in range(len(indices[0])):\n if len(indices) == 2:\n # multi_model = False -> AtomArray\n m = None\n i = indices[0][index]\n j = indices[1][index]\n box = array.box if periodic else None\n distance = struc.distance(array[i], array[j], box=box)\n if len(indices) == 3:\n # multi_model = True -> AtomArrayStack\n m = indices[0][index]\n i = indices[1][index]\n j = indices[2][index]\n box = array.box[m] if periodic else None\n distance = struc.distance(array[m,i], array[m,j], box=box)\n try:\n assert distance == pytest.approx(CUTOFF, abs=1e-4)\n except AssertionError:\n print(f\"Model {m}, Atoms {i} and {j}\")\n raise", "def test_align_sanity(self):\n # QWERTY resemblance matrix:\n R = qwerty_distance()\n diff, u, r = min_difference_align(\"polynomial\", \"exponential\", R)\n # Warning: we may (read: 'will') use another matrix!\n self.assertEqual(diff, 15)\n # Warning: there may be other optimal matchings!\n self.assertEqual(u, '--polyn-om-ial')\n self.assertEqual(r, 'exp-o-ne-ntial')", "def check_matrix(self, matrix):\n for i in range(self.size):\n if (matrix[0][i] + matrix[-1][i] == i % 2 or matrix[0][i] + matrix[-1][i] == (i + 1) % 2) and (\n matrix[i][0] + matrix[i][-1] == i % 2 or matrix[i][0] + matrix[i][-1] == (i + 1) % 2):\n pass\n else:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n for i in range(self.size):\n for j in range(self.size):\n if matrix[i][j] > 1:\n logging.debug(\"Matrix detection failed. Matrix passed to function \" + str(matrix))\n return False\n logging.info(\"Matrix detected : \" + str(matrix))\n return True", "def test_result(self, expected_matrix):\n return \"Result is correct\" if self.matrix == expected_matrix else \"Result is not correct\"", "def test_get_field_state_comparisons_invalid_distance_matrix(self):\r\n self.assertRaises(ValueError, get_field_state_comparisons,\r\n ['Samp.1', 'Samp.2'],\r\n array([[10.0, 0.0003], [0.0003, 0.0]]),\r\n self.small_mapping_header, self.small_mapping,\r\n self.small_field, ['SampleFieldState1'])", "def test_closest_multiple_mats(self):\n m = mats.Materials(\"mats_test.json\", NoneVisited())\n self.assertEqual( '164 G. Canis Majoris', m.closest([0, 0, 0], ['Tungsten', 'Germanium'])[1]['system'])\n self.assertEqual( '2MASS J10433563-5945136', m.closest([8000, 0, 3000], ['Tungsten', 'Germanium'])[1]['system'])", "def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)", "def test_closest(self):\n m = mats.Materials(\"mats_test.json\", NoneVisited())\n self.assertEqual( '164 G. Canis Majoris', m.closest([0, 0, 0], ['Tungsten'])[1]['system'])\n self.assertEqual( '2MASS J10433563-5945136', m.closest([0, 0, 0], ['Germanium'])[1]['system'])", "def verify_sub_matrixes(self, matrix=None):\n local_matrix = matrix if matrix else self.matrix\n\n for i in range(len(local_matrix.matrix)):\n temp_matrix = [[]]\n for j in range(i + 1):\n for k in range(i + 1):\n temp_matrix[j].append(local_matrix.matrix[j][k])\n temp_matrix.append([])\n \n temp_matrix.remove([])\n submatrix = Matrix(temp_matrix)\n print(f\"Submatriz de {i + 1}x{i + 1}\")\n det = submatrix.get_determinant()\n print(f\"Determinante = {det}\")\n submatrix.print_matrix()\n if det == 0:\n return False\n \n return True", "def test_compare_different_expectations(self):\n\n pd_single = norm(0, 1)\n pd = []\n for i in range(0, 3):\n pd.append(pd_single)\n meas = [-1, 0, 1]\n meanCRIGN1, singleCRIGN1 = crign.crign(pd, meas)\n\n pd2 = []\n for i in range(0, 3):\n pd2.append(norm(i, 1))\n meas2 = [-1, 1, 3]\n\n meanCRIGN2, singleCRIGN2 = crign.crign(pd2, meas2)\n\n is_good = np.isclose(singleCRIGN1, singleCRIGN2).all()\n assert_true(is_good, msg=\"Relation of individual CRIGN values should return roughly the same value.\")", "def test_minimum_all_different(self):\n temp_data = [(1.00, time.localtime()), (2.00, time.localtime()),\n (3.00, time.localtime()), (4.00, time.localtime())]\n\n tt = TemperatureTracker()\n result = tt.minimum_from(temp_data)\n self.assertEqual(result[0], 1.0)\n self.assertEqual(temp_data[0][1], result[1])", "def test_closest_common_mats(self):\n m = mats.Materials(\"mats_test.json\", NoneVisited())\n self.assertEqual( '164 G. Canis Majoris', m.closest([0, 0, 0], ['Iron'])[1]['system'])\n self.assertEqual( '2MASS J10433563-5945136', m.closest([8000, 0, 3000], ['Iron'])[1]['system'])", "def test_minimum_all_same(self):\n\n temp_data = [(3.00, time.localtime()), (3.00, time.localtime()),\n (3.00, time.localtime()), (3.00, time.localtime())]\n\n tt = TemperatureTracker()\n result = tt.minimum_from(temp_data)\n self.assertEqual(result[0], 3.0)\n self.assertEqual(temp_data[3][1], result[1])", "def test_matrix_distance(self):\n # note that the score matrix must contain 'diagonal' elements m[i][i]\n # to avoid failure when the sequences match.\n m = {\"U\": {\"U\": 0, \"C\": 1, \"A\": 5}, \"C\": {\"C\": 0, \"A\": 2, \"G\": 4}}\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"UCACGG\", m), 14)\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"\", m), 0)\n self.assertEqual(self.RNA(\"UUU\").matrix_distance(\"CAC\", m), 7)\n self.assertRaises(KeyError, self.RNA(\"UUU\").matrix_distance, \"CAG\", m)" ]
[ "0.67408186", "0.6515446", "0.64991003", "0.6317342", "0.6263982", "0.62471825", "0.6171229", "0.60156643", "0.60129744", "0.59566", "0.5921032", "0.5915475", "0.58995837", "0.58891666", "0.58565575", "0.5831528", "0.5819044", "0.58110535", "0.5809865", "0.5804188", "0.5800794", "0.5798708", "0.57866555", "0.57408583", "0.57218194", "0.57184446", "0.57095623", "0.57066", "0.56668586", "0.56565803" ]
0.7200956
0
Given 8 values corresponding to corners of a cube and a threshold value, this function returns the coordinates of a polygon to be plotted that divides the cube along edges where the threshold is crossed.
def marchingCubesPolygons(values, threshold): # define vertices of cube in (x,y,z) coordinates VERTICES = [ (0, 0, 0), (1, 0, 0), (1, 1, 0), (0, 1, 0), (0, 0, 1), (1, 0, 1), (1, 1, 1), (0, 1, 1)] # define edges of cube as combination of two vertices EDGES = [ (0, 1), (1, 2), (2, 3), (0, 3), (0, 4), (1, 5), (2, 6), (3, 7), (4, 5), (5, 6), (6, 7), (4, 7)] activeEdges = [] # list of active edges polygonVertices = [] # list of vertices to drwa # determine which edges are active for edge in EDGES: # edge is active if it straddles a threshold crossing if ((values[edge[0]] > threshold) != (values[edge[1]] > threshold)): activeEdges.append(edge) # create array of vertices for polygon as midpoints of edges for edge in activeEdges: midpoint = tuple((a + b) / 2 for a, b in zip(VERTICES[edge[0]], VERTICES[edge[1]])) polygonVertices.append(midpoint) # sort array of polygon vertices by distance to one another for index in range(len(polygonVertices)): a = polygonVertices[index] polygonVertices[index + 1:] = sorted(polygonVertices[index + 1:], key=lambda item: ((item[0] - a[0]) ** 2 + (item[1] - a[1]) ** 2 + ( item[2] - a[2]) ** 2) ** (1 / 2) ) return polygonVertices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;", "def square_boundaries(px , py, pz, incx, incy, incz, min_x, min_y, min_z, max_x, max_y, max_z):\n\n if px < min_x or px > max_x: \n pcx = px - incx \n\n if py < min_y or py > max_y:\n pcy = py - incy \n\n if pz < min_z or pz > max_z:\n pcz = pz - incz \n\n return pcx, pcy, pcz", "def find_boundbox(self, pointcloud):\n\t\tpointcloud=numpy.array(pointcloud) \n\t\tlowerleftcorner=numpy.min(pointcloud,0)\n\t\tupperrightcorner=numpy.max(pointcloud,0)\n\t\treturn lowerleftcorner,upperrightcorner", "def footprint_corner_indices():", "def voxel_to_corner(corner_vox, resolution, center):#TODO\n corners = center + corner_vox\n return corners", "def get_enclosing_box(corners):\n x_ = corners[:, [0, 2, 4, 6]]\n y_ = corners[:, [1, 3, 5, 7]]\n\n xmin = np.min(x_, 1).reshape(-1, 1)\n ymin = np.min(y_, 1).reshape(-1, 1)\n xmax = np.max(x_, 1).reshape(-1, 1)\n ymax = np.max(y_, 1).reshape(-1, 1)\n\n final = np.hstack((xmin, ymin, xmax, ymax, corners[:, 8:]))\n\n return final", "def corners((u,v)):\r\n return ((u+1,v+1), (u+1,v), (u,v), (u,v+1))", "def get_boundary_corners_2D(points):\r\n\tpadding=0.05\r\n\tif points.shape[0] == 3:\r\n\t\tassert (len(points.shape)==2)\r\n\t\tminPt_3d_x = np.amin(points[0,:])\r\n\t\tmaxPt_3d_x = np.amax(points[0,:])\r\n\t\tminPt_3d_y = np.amin(points[1,:])\r\n\t\tmaxPt_3d_y = np.amax(points[1,:])\r\n\r\n\t\tboudary = [minPt_3d_x-padding, maxPt_3d_x+padding, minPt_3d_y-padding, maxPt_3d_y+padding]\r\n\r\n\telse:\r\n\t\traise Exception(\"wrong dimension of points!\")\r\n\r\n\treturn boudary", "def _find_corners(self) -> list:\n width, height = self.width, self.height\n return [(0, 0), (width, 0), (0, height), (width, height)]", "def get_vertices_mask(poly, mask):\n h = mask.shape[0]\n w = mask.shape[1]\n gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)\n gt_poly[:,0] = np.floor(poly[:,0]*w)\n gt_poly[:,1] = np.floor(poly[:,1]*h)\n\n mask[gt_poly[:, 1], gt_poly[:, 0]] = 1.0\n\n return mask", "def calcFaceAreas(x,y,z):\n (nLonP1, nLatP1) = x.shape\n (nLon, nLat) = (nLonP1-1, nLatP1-1)\n\n area = numpy.zeros((nLon, nLat))\n\n for i in range(nLon):\n for j in range(nLat):\n left = distance( (x[i,j], y[i,j], z[i,j]), (x[i,j+1], y[i,j+1], z[i,j+1]) )\n right = distance( (x[i+1,j], y[i+1,j], z[i+1,j]), (x[i+1,j+1], y[i+1,j+1], z[i+1,j+1]) )\n top = distance( (x[i,j+1], y[i,j+1], z[i,j+1]), (x[i+1,j+1], y[i+1,j+1], z[i+1,j+1]) )\n bot = distance( (x[i,j], y[i,j], z[i,j]), (x[i+1,j], y[i+1,j], z[i+1,j]) )\n \n area[i,j] = 0.5*(left+right) * 0.5*(top+bot)\n\n return area", "def set_up_threshold_cube():\n test_data = 50*np.arange(16).reshape(4, 4)\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"surface_altitude\",\n units=\"m\",\n dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)])\n return test_cube", "def find_within_range(self, center, size, shape):\n\n if shape == \"cube\":\n \n payloads = []\n templist = [self.root]\n list_list = []\n list_list.append([self.root])\n for level in range(self.maxiter):\n list_list.append([])\n\n #print list_list\n for level in range(self.maxiter):\n for node in list_list[level]:\n Xedge_max = center[0] + size\n Xedge_min = center[0] - size\n Yedge_max = center[1] + size\n Yedge_min = center[1] - size\n Zedge_max = center[2] + size\n Zedge_min = center[2] - size\n\n corner0 = (Xedge_max, Yedge_max, Zedge_max)\n corner1 = (Xedge_max, Yedge_max, Zedge_min)\n corner2 = (Xedge_max, Yedge_min, Zedge_max)\n corner3 = (Xedge_max, Yedge_min, Zedge_min)\n corner4 = (Xedge_min, Yedge_max, Zedge_max)\n corner5 = (Xedge_min, Yedge_max, Zedge_min)\n corner6 = (Xedge_min, Yedge_min, Zedge_max)\n corner7 = (Xedge_min, Yedge_min, Zedge_min)\n corners = [corner0, corner1, corner2, corner3, corner4, corner5, corner6, corner7]\n table = ((corner0[0] > node.Xcenter),(corner0[1] > node.Ycenter) ,(corner0[2] > node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.posXposYposZ)\n table = ((corner1[0] > node.Xcenter),(corner1[1] > node.Ycenter) ,(corner1[2] < node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.posXposYnegZ)\n table = ((corner2[0] > node.Xcenter),(corner2[1] < node.Ycenter) ,(corner2[2] > node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.posXnegYposZ)\n table = ((corner3[0] > node.Xcenter),(corner3[1] < node.Ycenter) ,(corner3[2] < node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.posXnegYnegZ)\n table = ((corner4[0] < node.Xcenter),(corner4[1] > node.Ycenter) ,(corner4[2] > node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.negXposYposZ)\n table = ((corner5[0] < node.Xcenter),(corner5[1] > node.Ycenter) ,(corner5[2] < node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.negXposYnegZ)\n table = ((corner6[0] < node.Xcenter),(corner6[1] < node.Ycenter) ,(corner6[2] > node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.negXnegYposZ)\n table = ((corner7[0] < node.Xcenter),(corner7[1] < node.Ycenter) ,(corner7[2] < node.Zcenter))\n if not False in table:\n list_list[level+1].append(node.negXnegYnegZ)\n\n\n #must remove children that aren't real yet\n temp_templist = []\n for node in list_list[level+1]:\n try:\n node.Xcenter \n temp_templist.append(node)\n except AttributeError:\n pass\n list_list[level+1] = temp_templist\n \n\n payloads = [i.value for i in list_list[-1]]\n return payloads", "def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 10)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size", "def get_corners(self):\n (x_coord, y_coord) = (self.x_coord[0], self.y_coord[0])\n corner0 = (x_coord, y_coord)\n corner1 = (x_coord + self.size, y_coord)\n corner2 = (x_coord + self.size, y_coord + self.size)\n corner3 = (x_coord, y_coord + self.size)\n return (corner0, corner1, corner2, corner3)", "def binary_mask_to_polygon(binary_mask, tolerance=0):\r\n\r\n polygons = []\r\n if isinstance(binary_mask, torch.Tensor):\r\n binary_mask = binary_mask.cpu().numpy()\r\n # pad mask to close contours of shapes which start and end at an edge\r\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\r\n contours = measure.find_contours(padded_binary_mask, 0.5)\r\n contours = np.subtract(contours, 1)\r\n for contour in contours:\r\n contour = close_contour(contour)\r\n contour = measure.approximate_polygon(contour, tolerance)\r\n if len(contour) < 3:\r\n continue\r\n contour = np.flip(contour, axis=1) # x, y\r\n polygon = np.maximum(contour, 0)\r\n #segmentation = contour.ravel().tolist()\r\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\r\n #segmentation = [0 if i < 0 else i for i in segmentation]\r\n polygons.append(polygon)\r\n\r\n return polygons", "def g_corners(self):\n return (point for point in self.p)", "def get_corner_points(grid):\n\n grid_min = np.array(object=[min(v) for _, v in grid.items()])\n grid_max = np.array(object=[max(v) for _, v in grid.items()])\n\n grids_bounds = []\n for idx in range(len(grid_min)):\n tmp = [grid_min[idx], grid_max[idx]]\n grids_bounds.append(tmp)\n\n corner_points = pd.DataFrame(\n index=pd.MultiIndex.from_product(grids_bounds, names=range(grid_min.size),)\n ).reset_index()\n\n corner_points = np.array(object=corner_points)\n\n return corner_points", "def _cal_meaningful_corners(self):\n corners = np.where(self._free_of_clash)\n corners = np.array(corners, dtype=int)\n corners = corners.transpose()\n return corners", "def boundaries_and_initialize():\n greenLower = (29, 86, 6) # define the lower and upper boundaries of the \"green\"\n greenUpper = (64, 255, 255)\n pts = [((200,300),(255,255,255), 0)]\n blanks = []\n linecolor = (0,0,0)\n counter = 1\n radius = 11\n return greenLower, greenUpper, pts, linecolor, counter, blanks, radius", "def _rectangle_corners(rectangle):\n corner_points = []\n for i1 in (.5, -.5):\n for i2 in (i1, -1 * i1):\n corner_points.append((rectangle['rectangle_center'][0] + i1 * rectangle['length_parallel'],\n rectangle['rectangle_center'][1] + i2 * rectangle['length_orthogonal']))\n\n return _rotate_points(rectangle['rectangle_center'], rectangle['unit_vector_angle'], corner_points)", "def rectpolyctl(xmin,xmax,ymin,ymax):\n pc=[]\n pc.append((xmin,ymin))\n pc.append((xmin,ymax))\n pc.append((xmax,ymax))\n pc.append((xmax,ymin))\n pc.append((xmin,ymin))\n return pc", "def get_referenced_floor_area() -> np.ndarray:\n\n return envelope.get_referenced_floor_area()", "def calculate_box(vertices: [[float]]) -> [float]:\n x_coords = [x[0] for x in vertices]\n y_coords = [x[1] for x in vertices]\n z_coords = [x[2] for x in vertices]\n\n return [min(x_coords), min(y_coords), min(z_coords), max(x_coords), max(y_coords), max(z_coords)]", "def findPolygons(self):\n # perform marching cubes algorithm\n for x in range(self.worldSize - 1):\n for y in range(self.worldSize - 1):\n for z in range(self.worldSize - 1):\n # format values for entry\n values = [self.world[x][y][z], self.world[x + 1][y][z], self.world[x + 1][y + 1][z],\n self.world[x][y + 1][z],\n self.world[x][y][z + 1], self.world[x + 1][y][z + 1], self.world[x + 1][y + 1][z + 1],\n self.world[x][y + 1][z + 1]]\n # perform marchine cubes\n self.polygons[x][y][z] = marchingCubesPolygons(values, self.worldThreshold)", "def covering_box(boxes):\n x_min = np.amin([b.x for b in boxes])\n x_max = np.amax([b.x + b.width for b in boxes])\n y_min = np.amin([b.y for b in boxes])\n y_max = np.amax([b.y + b.height for b in boxes])\n cover = Box(x_min, y_min, x_max - x_min, y_max - y_min)\n return cover", "def _bounding_box_to_polytope(lower, upper):\n intervals = [(a[0], b[0]) for a, b in zip(lower, upper)]\n return box2poly(intervals)", "def _ComputeCorners(self):\n if self._length > 0:\n perp_unit = np.array([-self._angle_v[1], self._angle_v[0]])\n else:\n perp_unit = np.array([0., 0.])\n w2 = perp_unit * (self._width / 2.)\n\n corner_1 = np.array([self._start[0] + w2[0], self._start[1] + w2[1]])\n corner_2 = np.array([self._end[0] + w2[0], self._end[1] + w2[1]])\n corner_3 = np.array([self._end[0] - w2[0], self._end[1] - w2[1]])\n corner_4 = np.array([self._start[0] - w2[0], self._start[1] - w2[1]])\n return np.array([corner_1, corner_2, corner_3, corner_4])", "def get_boxcorners_front(places, rotates, size):\n corners = []\n for place, rotate, sz in zip(places, rotates, size):\n x, y, z = place\n h, w, l = sz\n if l > 10:\n continue\n\n # corner = np.array([\n # [x - l / 2., y - w / 2., z],\n # [x + l / 2., y - w / 2., z],\n # [x - l / 2., y + w / 2., z],\n # [x - l / 2., y - w / 2., z + h],\n # [x - l / 2., y + w / 2., z + h],\n # [x + l / 2., y + w / 2., z],\n # [x + l / 2., y - w / 2., z + h],\n # [x + l / 2., y + w / 2., z + h],\n # ])\n corner = np.array([\n [x - l / 2., y - w / 2., z], #bottom surface top right (0)\n # [x + l / 2., y - w / 2., z], #bottom surface bottom right (1)\n [x - l / 2., y + w / 2., z], #bottom surface top left (2)\n [x - l / 2., y - w / 2., z + h],#top surface top right (3)\n [x - l / 2., y + w / 2., z + h],#top surface top left (4)\n # [x + l / 2., y + w / 2., z],#bottom surface bottom left (5)\n # [x + l / 2., y - w / 2., z + h],# top surface bottom right (6)\n # [x + l / 2., y + w / 2., z + h],# top surface bottom left (7)\n ])\n\n # corner = np.array([\n # [x, y, z], #bottom surface bottom left (0) \n # [x, yy, z], #bottom surface right (1)\n # [xx, y, z], #bottom surface top left (2)(2)\n # [xx, yy, z],#bottom surface top right (3)(0)\n # [x, y, zz], #top surface left (4)\n # [x, yy, zz], #top surface right (5)\n # [xx, y, zz], #top surface top left (6)(4)\n # [xx, yy, zz],#top surface top right (7)(3)\n # ])\n\n corner -= np.array([x, y, z])\n\n rotate_matrix = np.array([\n [np.cos(rotate), -np.sin(rotate), 0],\n [np.sin(rotate), np.cos(rotate), 0],\n [0, 0, 1]\n ])\n\n a = np.dot(corner, rotate_matrix.transpose())\n a += np.array([x, y, z])\n corners.append(a)\n return np.array(corners).astype(np.float32)", "def detector_outline( bottom_vec3d_list, top_vec3d_list ):\n # hardcoded angular offset for hexagon\n phi0 = -20.0 * I3Units.degree \n\n # hardcoded threshold for an edge\n cos_angle_threshold = math.cos( 7.0 * I3Units.degree ) \n\n bottom = Vec3dList()\n top = Vec3dList()\n\n string_coords = []\n for b, t in zip( bottom_vec3d_list, top_vec3d_list ):\n if t[2] < 450.0 * I3Units.meter: # ignore deep-core\n continue\n string_coords.append(( math.atan2(t[1], t[0]),\n t[0], t[1], b[2], t[2] ))\n\n # border detection:\n # check if there is a point in each angular segment of hexagon\n border = []\n for i, cur in enumerate( string_coords ):\n counts = [False, False, False, False, False , False]\n for j, other in enumerate( string_coords ):\n if i == j: continue\n dx = cur[1] - other[1]\n dy = cur[2] - other[2]\n phi = int((math.atan2( dy, dx ) - phi0) / I3Units.degree)\n if phi < 0:\n phi += 360\n counts[phi // 60] = True\n neighbor_count = sum( counts )\n # border points don't have a full hexagon of neighbors\n if neighbor_count < 6:\n border.append( cur )\n\n border.sort() # put in circular order\n\n # edge detection:\n # check if differential vectors of three consecutive points have an angle\n for i in xrange( len(border) ):\n ax = border[i - 1][1] - border[i - 2][1]\n ay = border[i - 1][2] - border[i - 2][2]\n bx = border[i][1] - border[i - 1][1]\n by = border[i][2] - border[i - 1][2]\n anorm = (ax ** 2 + ay ** 2) ** 0.5\n bnorm = (bx ** 2 + by ** 2) ** 0.5\n cos_angle = (bx * ax + by * ay) / (anorm * bnorm)\n if cos_angle < cos_angle_threshold:\n cur = border[i - 1]\n bottom.append( vec3d(cur[1], cur[2], cur[3]) )\n top.append( vec3d(cur[1], cur[2], cur[4]) )\n\n return bottom, top" ]
[ "0.62861145", "0.62825006", "0.592349", "0.5918668", "0.5912606", "0.5804889", "0.58029455", "0.57807434", "0.5765746", "0.57598764", "0.57540196", "0.5722212", "0.57113737", "0.56979305", "0.56829304", "0.5670733", "0.5652377", "0.5641534", "0.56261015", "0.5614263", "0.56113654", "0.5584499", "0.5546608", "0.5545537", "0.5531884", "0.5517364", "0.54994345", "0.5494763", "0.5493848", "0.5482562" ]
0.714773
0
Converts the camera's polar position to cartesian and store the result in self.cameraPosition
def polarCameraToCartesian(self): x = self.cameraPolar[0] * np.sin(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180) y = self.cameraPolar[0] * np.cos(self.cameraPolar[2] * np.pi / 180) z = self.cameraPolar[0] * np.cos(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180) self.cameraPosition = [x, y, z]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _polar_to_cartesian(self, radius: float, radians: float) -> None:\n self.x = round(radius * math.cos(radians), EPSILON_EXP_MINUS_1)\n self.y = round(radius * math.sin(radians), EPSILON_EXP_MINUS_1)", "def polar(self):\n return PolarCoord((self._polar[0], self._polar[1]), self._polar[2])", "def polar2cartesian(phi, r):\n phi_radians = radians(phi)\n x = r*cos(phi_radians)\n y = r*sin(phi_radians)\n return x, y", "def polar2cartesian(polar):\n polar = np.array(polar).squeeze()\n r, azimuth = polar\n x = r * np.cos(azimuth)\n y = r * np.sin(azimuth)\n return np.array([x, y])", "def polar_to_cartesian(self, r, theta):\n # x = rcos(theta), y = rsin(theta)\n x, y = r*math.cos(theta), r*math.sin(theta)\n x, y = self.add((x, y), self.pole)\n return x, y", "def polarToCartesian(r,theta):\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return x,y", "def PolarToCartesian(Polar):\n\t \n # R,phi,z -> x,y,z\n cp = np.cos(Polar[:,1])\n sp = np.sin(Polar[:,1])\n x = Polar[:,0] * cp\n y = Polar[:,0] * sp\n z = Polar[:,2]\n\n if (len(Polar[0,:])==3):\n Cartesian = np.column_stack((x,y,z))\n else:\n # vR,vphi,vz -> vx,vy,vz\n vx = Polar[:,3]*cp-Polar[:,4]*sp\n vy = Polar[:,4]*cp+Polar[:,3]*sp\n vz = Polar[:,5]\n Cartesian = np.column_stack((x,y,z,vx,vy,vz))\n \n return Cartesian", "def polarToCartesian(theta=0, radius=0):\n\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return x, y", "def cartesian2polar(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y = cartesian\n r = np.linalg.norm([x, y])\n azimuth = np.arctan2(y, x)\n return np.array([r, azimuth])", "def CartesianToPolar(Cartesian):\n \n # x,y,z -> R,phi,z\n R = np.sqrt(Cartesian[:,0]*Cartesian[:,0]+Cartesian[:,1]*Cartesian[:,1])\n phi = np.arctan2(Cartesian[:,1],Cartesian[:,0])\n z = Cartesian[:,2]\n phi[phi<0.] += 2.*np.pi\n if (len(Cartesian[0,:])==3):\n Polar = np.column_stack((R,phi,z))\n else:\n # vx,vy,vz -> vR,vphi,vz\n cp = np.cos(phi)\n sp = np.sin(phi)\n vR = Cartesian[:,3]*cp+Cartesian[:,4]*sp\n vphi = Cartesian[:,4]*cp-Cartesian[:,3]*sp\n vz = Cartesian[:,5]\n Polar = np.column_stack((R,phi,z,vR,vphi,vz))\n\t\t\n return Polar", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def polar(position):\n return list(polar(complex(position[0], position[1])))", "def cartesianToPolar(x,y):\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y,x)\n\n return r,theta", "def cartesian_to_polar(self, x, y):\n # r = (x^2+y^2)^2, theta = tan^-1(y/x)\n # pole is the reference point of the coordinate system\n x, y = self.get_rel_to_pole(x, y)\n r = math.sqrt(pow(x, 2)+pow(y, 2))\n # set specific code for edge cases\n if x == 0 and y != 0:\n sign = lambda x: (1, -1)[x < 0]\n return r, sign(y)*math.pi/2\n if x == 0 and y == 0:\n return 0, 0\n else:\n theta = math.atan(y/x)\n return r, theta", "def cartesian_to_polar(cart: np.ndarray, radial_step: float, azimuth_step : float, radial_bins: int,\n azimuth_bins: int, cart_resolution: float) -> np.ndarray:\n max_range = radial_step * radial_bins\n angles = np.linspace(0, 2 * np.pi, azimuth_bins, dtype=np.float32).reshape(azimuth_bins, 1)\n ranges = np.linspace(0, max_range, radial_bins, dtype=np.float32).reshape(1, radial_bins)\n angles = np.tile(angles, (1, radial_bins))\n ranges = np.tile(ranges, (azimuth_bins, 1))\n x = ranges * np.cos(angles)\n y = ranges * np.sin(angles)\n cart_pixel_width = cart.shape[0]\n if (cart_pixel_width % 2) == 0:\n cart_min_range = (cart_pixel_width / 2 - 0.5) * cart_resolution\n else:\n cart_min_range = cart_pixel_width // 2 * cart_resolution\n u = (cart_min_range + y) / cart_resolution\n v = (cart_min_range - x) / cart_resolution\n cart_to_polar_warp = np.stack((u, v), -1)\n polar = np.expand_dims(cv2.remap(cart, cart_to_polar_warp, None, cv2.INTER_LINEAR), -1)\n return np.squeeze(polar)", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian", "def spherical_to_cartesian(r, lat, lon):\n import math\n\n if np.isscalar(r) and np.isscalar(lat) and np.isscalar(lon):\n x = r * math.cos(lat) * math.cos(lon)\n y = r * math.cos(lat) * math.sin(lon)\n z = r * math.sin(lat)\n else:\n x = r * np.cos(lat) * np.cos(lon)\n y = r * np.cos(lat) * np.sin(lon)\n z = r * np.sin(lat)\n\n return x, y, z", "def cartesian2polar(x, y):\n r = (x**2+y**2)**.5\n phi = atan2(y, x)\n return phi, r", "def polar_embedding(self):\n self.isomap_r, self.isomap_theta = coord_polar(self.isomap)\n\n return self", "def _get_polar_sky_coords(self, x0, y0):\n x_sky, y_sky = self._get_cart_sky_coords(x0, y0)\n return np.hypot(y_sky, x_sky), np.arctan2(x_sky, y_sky)", "def spherical_to_cartesian(self, r, phi, theta):\n x = r*cos(phi)*sin(theta)\n y = r*sin(phi)*sin(theta)\n z = r*cos(theta)\n \n return Vector(float(x), float(y), float(z))", "def polar_to_cartesian(r, theta):\n\n x = r * cos(theta)\n y = r * sin(theta)\n\n return x, y", "def get_cartesian_coords(self):\n r = 1\n dec = self.dec + 90\n x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(self.ra))\n y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(self.ra))\n z = r * math.cos(np.deg2rad(dec))\n\n return [x, y, z]", "def sphericalToCartesian(magnitude, azimuthal, polar):\r\n azimuthal = azimuthal*math.pi/180.0\r\n polar = polar*math.pi/180.0\r\n xval = magnitude * math.sin(azimuthal) * math.cos(polar)\r\n yval = magnitude * math.sin(azimuthal) * math.sin(polar)\r\n zval = magnitude * math.cos(azimuthal)\r\n return [xval, yval, zval]", "def cartesianToPolar(x=0, y=0):\n\n radius = np.hypot(x, y)\n theta = np.arctan2(y, x)\n return theta, radius", "def getCartesian(self, phi, theta, radius):\n point_x = round(sin(theta) * cos(phi) * radius,4)\n point_y = round(sin(theta) * sin(phi) * radius,4)\n point_z = round(cos(theta) * radius,4)\n return [point_x, point_y, point_z]", "def to_polar(self, physics=False):\n if self.__coordsys in (Cartesian, Cartesian_3):\n self.__coordsys = Polar if self.__coordsys == Cartesian \\\n else PhySpherical if physics else MathSpherical\n self.update_coord(vct.pol(self.list_repr()))", "def cartesian(position):\n return [position[0] * cos(position[1]), position[0] * sin(position[1])]" ]
[ "0.693107", "0.6905906", "0.6819338", "0.6803397", "0.67851555", "0.6774124", "0.66581005", "0.66437465", "0.65992516", "0.657186", "0.65377104", "0.6503731", "0.6501307", "0.64989936", "0.6482384", "0.64411527", "0.64346915", "0.64143777", "0.6405472", "0.6399689", "0.6361283", "0.63588256", "0.63438874", "0.6337026", "0.63228893", "0.6284321", "0.627702", "0.6266774", "0.6260132", "0.61969495" ]
0.8749414
0
Renders scene in OpenGL. The main axes and bounding box of the world are always drawn, but the visualization of the world data depends on the current drawing mode selected by the user
def drawScene(self): glBegin(GL_LINES) # draw axes glColor3f(1, 0, 0) glVertex3f(0, 0, 0) glVertex3f(self.worldSize / 2, 0, 0) glColor3f(0, 1, 0) glVertex3f(0, 0, 0) glVertex3f(0, self.worldSize / 2, 0) glColor3f(0, 0, 1) glVertex3f(0, 0, 0) glVertex3f(0, 0, self.worldSize / 2) # draw bounding box glColor3f(1, 1, 1) scalar = (self.worldSize - 1) / 2 for x in [-1, 1]: for y in [-1, 1]: for z in [-1, 1]: glVertex3f(scalar * x, scalar * y, scalar * z) for z in [-1, 1]: for x in [-1, 1]: for y in [-1, 1]: glVertex3f(scalar * x, scalar * y, scalar * z) for y in [-1, 1]: for z in [-1, 1]: for x in [-1, 1]: glVertex3f(scalar * x, scalar * y, scalar * z) glEnd() # draw spheres if in POINTS mode if self.displayMode is self.DISPLAYMODE_POINTS: prev = (0, 0, 0) offset = int(self.worldSize / 2) for x in range(self.worldSize): for y in range(self.worldSize): for z in range(self.worldSize): glTranslatef(x - offset - prev[0], y - offset - prev[1], z - offset - prev[2]) # use threshold for black/white coloring if self.world[x][y][z] > self.worldThreshold: glColor3f(1, 1, 1) else: glColor3f(0, 0, 0) gluSphere(self.sphere, 0.1, 8, 4) prev = (x - offset, y - offset, z - offset) # draw mesh if in MESH mode elif self.displayMode is self.DISPLAYMODE_MESH: offset = int(self.worldSize / 2) for x in range(self.worldSize - 1): for y in range(self.worldSize - 1): for z in range(self.worldSize - 1): if self.polygons[x][y][z]: glBegin(GL_POLYGON) glColor3f(x / self.worldSize, y / self.worldSize, z / self.worldSize) for vertex in self.polygons[x][y][z]: glVertex3f(x + vertex[0] - offset, y + vertex[1] - offset, z + vertex[2] - offset) glEnd() # draw wireframe in in WIRE mode elif self.displayMode is self.DISPLAYMODE_WIREFRAME: offset = int(self.worldSize / 2) for x in range(self.worldSize - 1): for y in range(self.worldSize - 1): for z in range(self.worldSize - 1): glBegin(GL_LINES) glColor3f(x / self.worldSize, y / self.worldSize, z / self.worldSize) for vertex in self.polygons[x][y][z]: glVertex3f(x + vertex[0] - offset, y + vertex[1] - offset, z + vertex[2] - offset) glEnd() # draw background in the distance glLoadIdentity() glBegin(GL_QUADS) glColor3f(59 / 256, 102 / 256, 212 / 256) glVertex3f(-30, -23, -49.5) glVertex3f(30, -23, -49.5) glColor3f(184 / 256, 201 / 256, 242 / 256) glVertex3f(30, 23, -49.5) glVertex3f(-30, 23, -49.5) glEnd() # HUD in white glColor3f(1, 1, 1) # lower left glWindowPos2f(10, 10) for ch in 'WASD: Rotate': glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch)) glWindowPos2f(10, 25) for ch in 'Wheel: Thresh': glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch)) glWindowPos2f(10, 40) for ch in 'R: Randomize': glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch)) glWindowPos2f(10, 55) for ch in 'O: Object': glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch)) glWindowPos2f(10, 70) for ch in 'I: Wireframe': glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch)) glWindowPos2f(10, 85) for ch in 'P: Points': glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch)) # upper right glWindowPos2f(self.displaySize[0] - 118, self.displaySize[1] - 25) for ch in 'Thresh: %0.2f' % self.worldThreshold: glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def paintGL(self):\n self._sceneviewer.renderScene()\n # paintGL end", "def draw_scene():\n # Place the camera\n camera.placeCamera()\n \n \n # Set up the global ambient light. (Try commenting out.)\n amb = [ 0*brightness, 0*brightness, 0*brightness, 1.0 ]\n glLightModelfv(GL_LIGHT_MODEL_AMBIENT, amb)\n\n # Set up the main light (LIGHT0)... or not.\n if is_light_on:\n place_blue_light()\n place_red_light()\n place_green_light()\n place_lamp_light()\n else:\n glDisable(GL_LIGHT0)\n glDisable(GL_LIGHT1)\n glDisable(GL_LIGHT2)\n glDisable(GL_LIGHT3)\n\n if lamp_light:\n place_lamp_light()\n else:\n glDisable(GL_LIGHT3)\n\n if headlamp_is_on:\n place_headlamp_light()\n else:\n glDisable(GL_LIGHT4)\n\n # Now spin the world around the y-axis (for effect).\n glRotated(angle_movement, 0, 1, 0)\n draw_objects()", "def render(self):\n draw.filled_rect(self.x, self.y, self.w, self.h, (0.0,0.0,0.0,1.))\n self._render_scrollbar(self.x, self.y + 1, self.w, 8)\n\n # render oscilloscope window edge\n gl.glPushMatrix()\n gl.glTranslatef(.5,.5,0.)\n gl.glLineWidth(1.)\n draw.rect(self.x, self.y, self.w, self.h, (0.6,0.6,0.6,1.))\n gl.glPopMatrix()\n\n gl.glPushMatrix()\n x, y, w, h = self._raw_graph_window_dim()\n xx, yy = self.parent.gl_coordinates(x, y)\n gl.glScissor(int(xx), int(yy - h), int(w), int(h))\n gl.glEnable(gl.GL_SCISSOR_TEST)\n #print \"sy1 %.2f sy2 %.2f sy2 - sy1 %.2f\" % (self.sy1, self.sy2, self.sy2 - self.sy1)\n self.graph_renderer.render(x, y, w, h, self.sx1, self.sy1, self.sx2 - self.sx1, self.sy2 - self.sy1)\n gl.glDisable(gl.GL_SCISSOR_TEST)\n gl.glPopMatrix()\n\n if self.render_legend:\n x, y = self.render_legend_pos\n self._render_legend(self.x + x, self.y + y)", "def redraw(self):\n self.appInit()\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n scene = self.scenes[self.current_scene]\n parallel = scene.projection_type()\n angle, ratio, near, far = scene.perspective()\n print angle, ratio, near, far\n if parallel:\n left, right, bottom, top, near, far = scene.ortho()\n glOrtho(left, right, bottom, top, near, far)\n else:\n left, right, bottom, top, near, far = scene.frustum()\n print left, right, bottom, top, near, far\n glFrustum(left, right, bottom, top, near, far)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n look_at = scene.look_at()\n\n print ';;', look_at[0], look_at[1], look_at[2], look_at[3], look_at[4], look_at[5], look_at[6], look_at[7], look_at[8]\n gluLookAt(look_at[0], look_at[1], look_at[2],\n look_at[3], look_at[4], look_at[5],\n look_at[6], look_at[7], look_at[8])\n glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT)\n if self.draw_axes:\n self.draw_coordinate_axes()\n scene.redraw()\n self.SwapBuffers()\n glFlush()", "def display():\n # Set the viewport to the full screen.\n glViewport(0, 0, win_width, win_height)\n\n camera.setProjection()\n \n # Clear the Screen.\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Set the shading model we want to use.\n glShadeModel(GL_SMOOTH if use_smooth else GL_FLAT)\n\n # Draw and show the \"Scene\".\n draw_scene()\n glFlush()\n glutSwapBuffers()", "def render(self, scene):\n if self.radius == 0:\n return\n\n self.init_model(scene)\n\n coverage_levels = [10, 30, 90, 250, 450]\n lod = self.lod_adjust(scene, coverage_levels, self.pos, self.radius)\n\n length = self.axis.mag()\n gl.glPushMatrix()\n self.model_world_transform(scene.gcf, Vector([length, self.radius,\n self.radius])).gl_mult()\n\n self.color.gl_set(self.opacity)\n if self.translucent:\n gl.glEnable(gl.GL_CULL_FACE)\n\n # Render the back half.\n gl.glCullFace(gl.GL_FRONT)\n scene.cone_model[lod].gl_render()\n\n # Render the front half.\n gl.glCullFace(gl.GL_BACK)\n scene.cone_model[lod].gl_render()\n else:\n scene.cone_model[lod].gl_render()\n gl.glPopMatrix()", "def init_gl(self):\n size = self.GetClientSize()\n self.SetCurrent(self.context)\n GL.glDrawBuffer(GL.GL_BACK)\n GL.glClearColor(1.0, 1.0, 1.0, 0.0)\n GL.glViewport(0, 0, size.width, size.height)\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glLoadIdentity()\n GL.glOrtho(0, size.width, 0, size.height, -1, 1)\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glLoadIdentity()\n GL.glTranslated(self.pan_x, self.pan_y, 0.0)\n GL.glScaled(self.zoom, self.zoom, self.zoom)", "def redraw(self):\n self.appInit()\n scene = self.scenes[self.current_scene]\n angle, ratio, near, far = scene.perspective()\n # print angle, ratio, near, far\n eye_x, eye_y, eye_z, look_x, look_y, look_z, up_x, up_y, up_z = scene.look_at()\n # print eye_x, eye_y, eye_z, look_x, look_y, look_z, up_x, up_y, up_z\n\n glMatrixMode(GL_PROJECTION)\n gluPerspective(angle, ratio, near, far)\n glMatrixMode(GL_MODELVIEW)\n gluLookAt(eye_x, eye_y, eye_z, look_x, look_y, look_z, up_x, up_y, up_z)\n\n glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT)\n\n if self.draw_axes:\n self.draw_coordinate_axes()\n\n scene.redraw()\n self.SwapBuffers()", "def init_gl(self):\n size = self.GetClientSize()\n self.SetCurrent(self.context)\n\n GL.glViewport(0, 0, size.width, size.height)\n\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glLoadIdentity()\n GLU.gluPerspective(45, size.width / size.height, 10, 10000)\n\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glLoadIdentity() # lights positioned relative to the viewer\n GL.glLightfv(GL.GL_LIGHT0, GL.GL_AMBIENT, self.no_ambient)\n GL.glLightfv(GL.GL_LIGHT0, GL.GL_DIFFUSE, self.med_diffuse)\n GL.glLightfv(GL.GL_LIGHT0, GL.GL_SPECULAR, self.no_specular)\n GL.glLightfv(GL.GL_LIGHT0, GL.GL_POSITION, self.top_right)\n GL.glLightfv(GL.GL_LIGHT1, GL.GL_AMBIENT, self.no_ambient)\n GL.glLightfv(GL.GL_LIGHT1, GL.GL_DIFFUSE, self.dim_diffuse)\n GL.glLightfv(GL.GL_LIGHT1, GL.GL_SPECULAR, self.no_specular)\n GL.glLightfv(GL.GL_LIGHT1, GL.GL_POSITION, self.straight_on)\n\n GL.glMaterialfv(GL.GL_FRONT, GL.GL_SPECULAR, self.mat_specular)\n GL.glMaterialfv(GL.GL_FRONT, GL.GL_SHININESS, self.mat_shininess)\n GL.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE,\n self.mat_diffuse)\n GL.glColorMaterial(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE)\n\n GL.glClearColor(1.0, 1.0, 1.0, 0.0)\n GL.glDepthFunc(GL.GL_LEQUAL)\n GL.glShadeModel(GL.GL_SMOOTH)\n GL.glDrawBuffer(GL.GL_BACK)\n GL.glCullFace(GL.GL_BACK)\n GL.glEnable(GL.GL_COLOR_MATERIAL)\n GL.glEnable(GL.GL_CULL_FACE)\n GL.glEnable(GL.GL_DEPTH_TEST)\n GL.glEnable(GL.GL_LIGHTING)\n GL.glEnable(GL.GL_LIGHT0)\n GL.glEnable(GL.GL_LIGHT1)\n GL.glEnable(GL.GL_NORMALIZE)\n\n # Viewing transformation - set the viewpoint back from the scene\n GL.glTranslatef(0.0, 0.0, -self.depth_offset)\n\n # Modelling transformation - pan, zoom and rotate\n GL.glTranslatef(self.pan_x, self.pan_y, 0.0)\n GL.glMultMatrixf(self.scene_rotate)\n GL.glScalef(self.zoom, self.zoom, self.zoom)", "def render(self):\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY);\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY);\n gl.glVertexPointer(3, gl.GL_DOUBLE, 0, self._vertices);\n gl.glNormalPointer(gl.GL_DOUBLE, 0, self._normals);\n for patch in self._patches:\n patch.render()\n gl.glDisableClientState(gl.GL_VERTEX_ARRAY);\n gl.glDisableClientState(gl.GL_NORMAL_ARRAY);", "def initializeGL(self):\n # background color\n gl.glClearColor(0,0,0,0)\n gl.glViewport(0, 0, self.width, self.height)\n gl.glMatrixMode(gl.GL_PROJECTION)\n gl.glLoadIdentity()", "def render(self):\n GL.glColor(*self._color)\n\n GL.glLoadIdentity()\n GL.glTranslate(self._x, self._y, 0)\n\n GL.glBegin(GL.GL_QUADS)\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(self._width, 0, 0)\n GL.glVertex3f(self._width, self._height, 0)\n GL.glVertex3f(0, self._height, 0)\n GL.glEnd()", "def init_gl(self):\n\n # default background color is white-ish\n background = [.99, .99, .99, 1.0]\n # if user passed a background color use it\n if 'background' in self.kwargs:\n try:\n # convert to (4,) uint8 RGBA\n background = to_rgba(self.kwargs['background'])\n # convert to 0.0 - 1.0 float\n background = background.astype(np.float64) / 255.0\n except BaseException:\n log.error('background color wrong!',\n exc_info=True)\n # apply the background color\n gl.glClearColor(*background)\n\n max_depth = (np.abs(self.scene.bounds).max(axis=1) ** 2).sum() ** .5\n max_depth = np.clip(max_depth, 500.00, np.inf)\n gl.glDepthRange(0.0, max_depth)\n\n gl.glClearDepth(1.0)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glDepthFunc(gl.GL_LEQUAL)\n\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glEnable(gl.GL_LIGHTING)\n gl.glEnable(gl.GL_LIGHT0)\n gl.glEnable(gl.GL_LIGHT1)\n\n # put the light at one corner of the scenes AABB\n gl.glLightfv(gl.GL_LIGHT0,\n gl.GL_POSITION,\n rendering.vector_to_gl(np.append(self.scene.bounds[1], 0)))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_SPECULAR,\n rendering.vector_to_gl(.5, .5, 1, 1))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_DIFFUSE,\n rendering.vector_to_gl(1, 1, 1, .75))\n gl.glLightfv(gl.GL_LIGHT0, gl.GL_AMBIENT,\n rendering.vector_to_gl(.1, .1, .1, .2))\n\n gl.glColorMaterial(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT_AND_DIFFUSE)\n gl.glEnable(gl.GL_COLOR_MATERIAL)\n gl.glShadeModel(gl.GL_SMOOTH)\n\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_AMBIENT,\n rendering.vector_to_gl(0.192250, 0.192250, 0.192250))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_DIFFUSE,\n rendering.vector_to_gl(0.507540, 0.507540, 0.507540))\n gl.glMaterialfv(gl.GL_FRONT,\n gl.GL_SPECULAR,\n rendering.vector_to_gl(.5082730, .5082730, .5082730))\n\n gl.glMaterialf(gl.GL_FRONT,\n gl.GL_SHININESS,\n .4 * 128.0)\n\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)\n\n gl.glLineWidth(1.5)\n gl.glPointSize(4)", "def render(self, mode='human'):\n\n if self.RENDER_ENV_ONLY:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=np.array([120, 120, 120])/255.0)\n bezel = 10\n \n self._env_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n self._agent_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n if (self.RENDER_INDIV_MEMORY == True and self.INDIV_MEMORY == \"fog\") or (self.RENDER_TEAM_MEMORY == True and self.TEAM_MEMORY == \"fog\"):\n SCREEN_W = 1200\n SCREEN_H = 600\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n \n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n\n self._env_render(self._static_map,\n [7, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [7+1.49*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_red_render,\n [7+1.49*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [7, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # ind blue agent memory rendering\n for num_blue, blue_agent in enumerate(self._team_blue):\n if num_blue < 2:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+num_blue*SCREEN_H//4, 7], [SCREEN_H//4-10, SCREEN_H//4-10])\n else:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+(num_blue-2)*SCREEN_H//4, 7+SCREEN_H//4], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n # ind red agent memory rendering\n for num_red, red_agent in enumerate(self._team_red):\n if num_red < 2:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+num_red*SCREEN_H//4, 7+1.49*SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n \n else:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+(num_red-2)*SCREEN_H//4, 7+SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n if self.TEAM_MEMORY == \"fog\" and self.RENDER_TEAM_MEMORY == True:\n # blue team memory rendering\n blue_visited = np.copy(self._static_map)\n blue_visited[self.blue_memory] = UNKNOWN\n self._env_render(blue_visited,\n [7+2.98*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # red team memory rendering \n red_visited = np.copy(self._static_map)\n red_visited[self.red_memory] = UNKNOWN\n self._env_render(red_visited,\n [7+2.98*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n else:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n \n self._env_render(self._static_map,\n [5, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10], self._team_blue)\n self._env_render(self.get_obs_red_render,\n [5+SCREEN_W//2, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n\n if self.SILENCE_RENDER:\n return self.viewer.get_array()\n else:\n return self.viewer.render(return_rgb_array = mode=='rgb_array')", "def init(width, height):\n\tglClearColor(0.0, 0.0, 1.0, 0.0) #blue bg\n\tglMatrixMode(GL_PROJECTION)\n\tglLoadIdentity()\n\tglOrtho(-0.5, 2.5, -1.5, 1.5, -1.0, 1.0)", "def on_draw():\n window.clear()\n world.draw()", "def draw(self):\n self.scene.draw(self.screen)", "def drawSimple(self, screen):\r\n self.worlds[0].renderer.render(screen)", "def view_draw(self, context):\n self.override_context = context.copy()\n region = context.region\n view = context.region_data\n\n vmat = view.view_matrix.copy()\n vmat_inv = vmat.inverted()\n pmat = view.perspective_matrix * vmat_inv\n\n viewport = [region.x, region.y, region.width, region.height]\n\n self.update_view(vmat, pmat, viewport)\n\n glPushAttrib(GL_ALL_ATTRIB_BITS)\n\n glDisable(GL_DEPTH_TEST)\n glDisable(GL_CULL_FACE)\n glDisable(GL_STENCIL_TEST)\n glEnable(GL_TEXTURE_2D)\n\n glClearColor(0, 0, 1, 1)\n glClear(GL_COLOR_BUFFER_BIT)\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n glLoadIdentity()\n\n glActiveTexture(GL_TEXTURE0)\n glBindTexture(GL_TEXTURE_2D, self.tex)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, self.width, self.height, 0, GL_RGB,\n GL_UNSIGNED_BYTE, self.processor.image_buffer)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n\n glBegin(GL_QUADS)\n glColor3f(1.0, 1.0, 1.0)\n glTexCoord2f(0.0, 0.0)\n glVertex3i(-1, -1, 0)\n glTexCoord2f(1.0, 0.0)\n glVertex3i(1, -1, 0)\n glTexCoord2f(1.0, 1.0)\n glVertex3i(1, 1, 0)\n glTexCoord2f(0.0, 1.0)\n glVertex3i(-1, 1, 0)\n glEnd()\n\n glPopMatrix()\n glMatrixMode(GL_MODELVIEW)\n glPopMatrix()\n\n glPopAttrib()", "def draw( self ):\r\n print \"Drawing cuboid!\"\r\n glTranslated( *self.pos3D ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n if self.rotnByOGL:\r\n glRotated( self.thetaDeg , *self.rotAxis )\r\n # glTranslated( 0 , 0 , 0 ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n glColor3ub( *self.color ) # Get the color according to the voxel type\r\n print \"DEBUG:\" , \"Set color to\" , self.color\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_QUADS , # -------------- Draw quadrilaterals\r\n self.indices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n glColor3ub( *self.colorLine )\r\n pyglet.gl.glLineWidth( 3 )\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_LINES , # -------------- Draw quadrilaterals\r\n self.linDices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n print \"DEBUG:\" , \"Indices\"\r\n print self.indices \r\n print \"DEBUG:\" , \"Vertices\"\r\n print self.vertices \r\n \"\"\" URL: http://pyglet.readthedocs.io/en/pyglet-1.2-maintenance/programming_guide/graphics.html#vertex-lists\r\n \r\n There is a significant overhead in using pyglet.graphics.draw and pyglet.graphics.draw_indexed due to pyglet \r\n interpreting and formatting the vertex data for the video device. Usually the data drawn in each frame (of an animation) \r\n is identical or very similar to the previous frame, so this overhead is unnecessarily repeated.\r\n \r\n A VertexList is a list of vertices and their attributes, stored in an efficient manner that’s suitable for direct \r\n upload to the video card. On newer video cards (supporting OpenGL 1.5 or later) the data is actually stored in video memory.\r\n \"\"\"\r\n if self.rotnByOGL:\r\n glRotated( -self.thetaDeg , *self.rotAxis )\r\n glTranslated( *np.multiply( self.pos3D , -1 ) ) # Reset the transform coordinates\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n print \"Done drawing!\"", "def appInit(self):\n glMatrixMode( GL_PROJECTION )\n glLoadIdentity()\n glMatrixMode( GL_MODELVIEW )\n glLoadIdentity()\n\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glEnable( GL_DEPTH_TEST )\n glShadeModel( GL_SMOOTH )\n glEnable( GL_NORMALIZE )\n glEnable( GL_COLOR_MATERIAL )\n\n self.set_lighting()\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)\n self.make_simple_scenes()\n self.make_multi_object_scene()", "def draw(self, projection, view, _model, **_kwargs):\n\n shid = self.skinning_shader.glid\n GL.glUseProgram(shid)\n\n # setup camera geometry parameters\n loc = GL.glGetUniformLocation(shid, 'projection')\n GL.glUniformMatrix4fv(loc, 1, True, projection)\n loc = GL.glGetUniformLocation(shid, 'view')\n GL.glUniformMatrix4fv(loc, 1, True, view)\n # bone world transform matrices need to be passed for skinning\n for bone_id, node in enumerate(self.bone_nodes):\n bone_matrix = node.world_transform @ self.bone_offsets[bone_id]\n\n bone_loc = GL.glGetUniformLocation(shid, 'boneMatrix[%d]' % bone_id)\n GL.glUniformMatrix4fv(bone_loc, 1, True, bone_matrix)\n\n # draw mesh vertex array\n self.vertex_array.draw(GL.GL_TRIANGLES)\n\n # leave with clean OpenGL state, to make it easier to detect problems\n GL.glUseProgram(0)", "def render(self):\n self.SetCurrent(self.context)\n if not self.init:\n # Configure the OpenGL rendering context\n self.init_gl()\n self.init = True\n\n # Clear everything\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n for device_id, output_id in self.monitors.monitors_dictionary:\n i = list(\n self.monitors.monitors_dictionary.keys()).index(\n (device_id, output_id))\n GL.glColor3f(self.colours[i%8][0], self.colours[i%8][1], self.colours[i%8][2])\n x = (i - len(self.monitors.monitors_dictionary)/2)*20\n signal_list = self.monitors.monitors_dictionary[(\n device_id, output_id)]\n\n signal_name = self.devices.get_signal_name(device_id, output_id)\n\n self.render_text(signal_name, x, 0, 10*len(signal_list)+10)\n\n length = len(signal_list)\n for j in range(length):\n signal = signal_list[j]\n z = (j-length/2)*20\n if signal == self.devices.HIGH:\n y = 11\n if signal == self.devices.LOW:\n y = 1\n if signal != self.devices.BLANK:\n self.draw_cuboid(x, z, 5, 10, y)\n\n # We have been drawing to the back buffer, flush the graphics pipeline\n # and swap the back buffer to the front\n GL.glFlush()\n self.SwapBuffers()", "def Render(self, mode):\n\n shaders.glUseProgram(self.shader)\n try:\n self.vbo.bind()\n try:\n glEnableClientState(GL_VERTEX_ARRAY)\n GLVertexPointer(self.vbo)\n glDrawArrays(GL_TRIANGLES, 0, 9)\n finally:\n self.vbo.unbind()\n glDisableClientState(GL_VERTEX_ARRAY)\n finally:\n shaders.glUseProgram(0)", "def render(self):\n glPushMatrix()\n glMultMatrixf(np.transpose(self.translation_matrix))\n glMultMatrixf(self.scaling_matrix)\n color = color.COLORS[self.color_index]\n glColor3f(color[0], color[1], color[2])\n\n if self.selected:\n # Emit light\n glMaterialfv(GL_FRONT, GL_EMISSION, [0.0, 0.0, 0.0])\n\n glPopMatrix()", "def renderScene(self, frame, model, rects=False):\n \n # Get Coordinates\n eyeRects = model.getEyeRects();\n faceRect = model.getFaceRect();\n linePoints = model.getEyeLine();\n \n # Draw Shapes and display frame\n self.drawLine(frame, linePoints[0],linePoints[1],(0, 0, 255));\n self.drawRectangle(frame, faceRect, (0, 0, 255));\n self.drawRectangle(frame, eyeRects[0], (0, 255, 0));\n self.drawRectangle(frame, eyeRects[1], (0, 255, 0));\n \n if rects is not False:\n self.drawRectangle(frame, rects['eyeLeft'], (152,251,152));\n self.drawRectangle(frame, rects['eyeRight'],(152,251,152));\n \n cv2.imshow(\"Video\", frame);", "def render(self, camera=None):\r\n glPushMatrix()\r\n x,y,z = self.pos\r\n glTranslatef(x,y,-z)\r\n a, b, c = self.rotation\r\n glRotatef(a, 1, 0, 0)\r\n glRotatef(b, 0, 1, 0)\r\n glRotatef(c, 0, 0, 1)\r\n try:\r\n glScalef(*self.scale)\r\n except:\r\n glScalef(self.scale, self.scale, self.scale)\r\n glColor(*self.colorize)\r\n\r\n if self.outline:\r\n misc.outline(misc.OutlineGroup([i[0] for i in self.gl_lists]),\r\n self.outline_color, self.outline_size)\r\n\r\n for i in self.gl_lists:\r\n i[1].bind()\r\n i[0].render()\r\n glPopMatrix()", "def initializeGL(self):\n glClearColor(0, 0, 0, 1.0)\n # chk this out -> this clears the screen with black whereas in other programs\n # 0.9,0.9,1.0,1.0 clear the screen to black\n\n glClearDepth(1.0)\t\n glDepthFunc(GL_LESS)\t\n glEnable(GL_DEPTH_TEST)\t\n glShadeModel(GL_SMOOTH)\t\n\n #glViewport(self.viewportX, self.viewportY, self.viewportWidth, self.viewportY)\n #glViewport(0,0, self.width, self.height)\n \n # Reset The Current Viewport And Perspective Transformation\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n\n glOrtho(self.initX, self.width, self.initY, self.height, 0, 100)\n \n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()", "def appInit(self):\n glutInitDisplayMode( GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH )\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0 )\n glEnable( GL_DEPTH_TEST )\n glShadeModel( GL_SMOOTH )\n glEnable( GL_NORMALIZE )\n glEnable( GL_COLOR_MATERIAL )\n\n glEnable( GL_LIGHTING )\n glEnable( GL_LIGHT0 )\n\n self.set_lighting()\n\n self.make_simple_scenes()\n self.make_multi_object_scene()", "def create_scene(self):\n \n self.scene=soya.World()" ]
[ "0.7506204", "0.74406266", "0.7282392", "0.7179098", "0.70508254", "0.69576925", "0.69117737", "0.6708232", "0.6666103", "0.663896", "0.6623787", "0.6541221", "0.6501028", "0.64759177", "0.64395064", "0.6425605", "0.64179164", "0.64148545", "0.6398485", "0.638504", "0.6370898", "0.63703656", "0.6357739", "0.63551223", "0.63509285", "0.63361275", "0.6299752", "0.62628883", "0.62560475", "0.6253325" ]
0.8281561
0
Perform MarchingCubesPolygons algorithm across the worldspace to generate an array of polygons to plot
def findPolygons(self): # perform marching cubes algorithm for x in range(self.worldSize - 1): for y in range(self.worldSize - 1): for z in range(self.worldSize - 1): # format values for entry values = [self.world[x][y][z], self.world[x + 1][y][z], self.world[x + 1][y + 1][z], self.world[x][y + 1][z], self.world[x][y][z + 1], self.world[x + 1][y][z + 1], self.world[x + 1][y + 1][z + 1], self.world[x][y + 1][z + 1]] # perform marchine cubes self.polygons[x][y][z] = marchingCubesPolygons(values, self.worldThreshold)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generatePolygons():", "def marchingCubesPolygons(values, threshold):\n # define vertices of cube in (x,y,z) coordinates\n VERTICES = [\n (0, 0, 0),\n (1, 0, 0),\n (1, 1, 0),\n (0, 1, 0),\n (0, 0, 1),\n (1, 0, 1),\n (1, 1, 1),\n (0, 1, 1)]\n # define edges of cube as combination of two vertices\n EDGES = [\n (0, 1),\n (1, 2),\n (2, 3),\n (0, 3),\n (0, 4),\n (1, 5),\n (2, 6),\n (3, 7),\n (4, 5),\n (5, 6),\n (6, 7),\n (4, 7)]\n\n activeEdges = [] # list of active edges\n polygonVertices = [] # list of vertices to drwa\n\n # determine which edges are active\n for edge in EDGES:\n # edge is active if it straddles a threshold crossing\n if ((values[edge[0]] > threshold) != (values[edge[1]] > threshold)):\n activeEdges.append(edge)\n\n # create array of vertices for polygon as midpoints of edges\n for edge in activeEdges:\n midpoint = tuple((a + b) / 2 for a, b in zip(VERTICES[edge[0]], VERTICES[edge[1]]))\n polygonVertices.append(midpoint)\n\n # sort array of polygon vertices by distance to one another\n for index in range(len(polygonVertices)):\n a = polygonVertices[index]\n polygonVertices[index + 1:] = sorted(polygonVertices[index + 1:],\n key=lambda item: ((item[0] - a[0]) ** 2 + (item[1] - a[1]) ** 2 + (\n item[2] - a[2]) ** 2) ** (1 / 2)\n )\n\n return polygonVertices", "def _make_collections(polygons, opacity=1):\n collection = []\n for color in polygons:\n collection.append(\n Poly3DCollection(\n [p.points_matrix for p in polygons[color]],\n alpha=opacity,\n facecolor=color,\n edgecolors=\"black\",\n )\n )\n return collection", "def _make_collections(polygons, opacity=1):\n collection = []\n for color in polygons:\n collection.append(Poly3DCollection(\n [p.points_matrix for p in polygons[color]],\n alpha=opacity,\n facecolor=color,\n edgecolors='black'))\n return collection", "def render_solid_3d(self, **kwds):\n return sum([ polygon3d(self.coordinates_of(f), **kwds) \n for f in self.polygons ])", "def _transform_polygons(self, polygons):\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if isinstance(polygons, dict):\n out_polygons = {}\n for kk in polygons.keys():\n out_polygons[kk] = []\n for ii in range(self.columns):\n for jj in range(self.rows):\n spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])\n for points in polygons[kk]:\n if self.magnification:\n out_polygons[kk].append(points * mag + spc)\n else:\n out_polygons[kk].append(points + spc)\n if self.x_reflection:\n out_polygons[kk][-1] = out_polygons[kk][-1] * xrefl\n if self.rotation is not None:\n out_polygons[kk][-1] = (\n out_polygons[kk][-1] * ct\n + out_polygons[kk][-1][:, ::-1] * st\n )\n if self.origin is not None:\n out_polygons[kk][-1] = out_polygons[kk][-1] + orgn\n else:\n out_polygons = []\n for ii in range(self.columns):\n for jj in range(self.rows):\n spc = numpy.array([self.spacing[0] * ii, self.spacing[1] * jj])\n for points in polygons:\n if self.magnification is not None:\n out_polygons.append(points * mag + spc)\n else:\n out_polygons.append(points + spc)\n if self.x_reflection:\n out_polygons[-1] = out_polygons[-1] * xrefl\n if self.rotation is not None:\n out_polygons[-1] = (\n out_polygons[-1] * ct + out_polygons[-1][:, ::-1] * st\n )\n if self.origin is not None:\n out_polygons[-1] = out_polygons[-1] + orgn\n return out_polygons", "def generaCubo(self):\r\n #Use Panda predefined format for vertex coordinate only\r\n format = GeomVertexFormat.getV3()\r\n \r\n #Build Vertex data using the created format. Vertex will never change so I use Static attribute \r\n vdata = GeomVertexData('CuboData', format, Geom.UHStatic)\r\n \r\n #I will have to write vertex data so I create a writer for these data\r\n vertex = GeomVertexWriter(vdata, 'vertex')\r\n \r\n #I now use the writer to add vertex data\r\n vertex.addData3f(0, 0, 0)\r\n vertex.addData3f(1, 1, 1)\r\n vertex.addData3f(0, 1, 1)\r\n vertex.addData3f(0, 1, 0)\r\n vertex.addData3f(0, 0, 1)\r\n vertex.addData3f(1, 0, 0)\r\n vertex.addData3f(1, 0, 1)\r\n vertex.addData3f(1, 1, 0)\r\n \r\n #I now create 12 triangles\r\n prim = GeomTriangles(Geom.UHStatic)\r\n\r\n #and then I add vertex to them\r\n #Next time use addVertices(0,1,2) !!!\r\n prim.addVertex(7)\r\n prim.addVertex(0)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(3)\r\n prim.addVertex(0)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(2)\r\n prim.addVertex(6)\r\n prim.addVertex(4)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(6)\r\n prim.addVertex(2)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(7)\r\n prim.addVertex(2)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(2)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(3)\r\n prim.addVertex(4)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(2)\r\n prim.addVertex(4)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(0)\r\n prim.addVertex(6)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(4)\r\n prim.addVertex(6)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(5)\r\n prim.addVertex(1)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(6)\r\n prim.addVertex(1)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n #Create a Geom to bing vertex data to primitives\r\n geom = Geom(vdata)\r\n geom.addPrimitive(prim)\r\n\r\n #Create a node for the Geom in order to be able to render it\r\n node = GeomNode('gnode')\r\n node.addGeom(geom)\r\n\r\n #Adde the node to the scene graph == render it!\r\n nodePath = render.attachNewNode(node)\r\n \r\n #is this needed?\r\n nodePath.setPos( 0, 5, 0)\r\n \r\n self.camera.lookAt(nodePath)\r\n \r\n base.setBackgroundColor( .0, .0, .0 )\r\n \r\n taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")", "def genCubes():\n offset = vpy.vector(.5, .5, .5)\n size = vpy.vector(.2, .2, .2)\n B1 = vpy.box(pos=vpy.vector(0, 0, 0)-offset,\n color=vpy.vector(0, 0, 0), size=size, make_trail=True)\n B2 = vpy.box(pos=vpy.vector(0, 0, 1)-offset,\n color=vpy.vector(0, 0, 1), size=size, make_trail=True)\n B3 = vpy.box(pos=vpy.vector(0, 1, 1)-offset,\n color=vpy.vector(0, 1, 1), size=size, make_trail=True)\n B4 = vpy.box(pos=vpy.vector(0, 1, 0)-offset,\n color=vpy.vector(0, 1, 0), size=size, make_trail=True)\n\n B5 = vpy.box(pos=vpy.vector(1, 0, 0)-offset,\n color=vpy.vector(1, 0, 0), size=size, make_trail=True)\n B6 = vpy.box(pos=vpy.vector(1, 0, 1)-offset,\n color=vpy.vector(1, 0, 1), size=size, make_trail=True)\n B7 = vpy.box(pos=vpy.vector(1, 1, 0)-offset,\n color=vpy.vector(1, 1, 0), size=size, make_trail=True)\n B8 = vpy.box(pos=vpy.vector(1, 1, 1)-offset,\n color=vpy.vector(1, 1, 1), size=size, make_trail=True)\n\n return [B1, B2, B3, B4, B5, B6, B7, B8]", "def generatePolygons(self, *args, **kwargs): \n return 'var PloneMapPolygons = [' + \\\n ''.join([\"{ 'id': '%s', 'path' : %s,'title':'%s'},\" % (object.id, object.polygon, object.Title()) \n for object in self.context.objectValues() \n if hasattr(object, 'polygon') and len(object.polygon) > 0 ])[:-1] \\\n + '];'", "def view_polygons(polygons):\n # create the figure and add the surfaces\n plt.figure()\n ax = plt.axes(projection='3d')\n\n collections = _make_collections(polygons, opacity=0.5)\n\n for c in collections:\n ax.add_collection3d(c)\n\n # calculate and set the axis limits\n limits = _get_limits(polygons=polygons)\n ax.set_xlim(limits['x'])\n ax.set_ylim(limits['y'])\n ax.set_zlim(limits['z'])\n\n plt.show()", "def view_polygons(polygons):\n # create the figure and add the surfaces\n plt.figure()\n ax = plt.axes(projection=\"3d\")\n\n collections = _make_collections(polygons, opacity=0.5)\n\n for c in collections:\n ax.add_collection3d(c)\n\n # calculate and set the axis limits\n limits = _get_limits(polygons=polygons)\n ax.set_xlim(limits[\"x\"])\n ax.set_ylim(limits[\"y\"])\n ax.set_zlim(limits[\"z\"])\n\n plt.show()", "def _write_polygons(\n self,\n shapes: Iterable[Polygon],\n emissions: Iterable[float],\n info: EmissionInfo,\n source_group: int,\n ):\n\n # Rasterize the polygon on a grid\n shapes_serie = gpd.GeoSeries(shapes)\n # get polygon bounds\n minx, miny, maxx, maxy = shapes_serie.total_bounds\n # Create a grid for the rasterization\n x = np.arange(minx, maxx, self.polygon_raster_size)\n y = np.arange(miny, maxy, self.polygon_raster_size)\n\n # Get the emission per cell\n average_cells_proportion = (self.polygon_raster_size**2) / shapes_serie.area\n cell_emissions = np.array(emissions) * average_cells_proportion\n\n # WARNING: this might be not exactly mass convserving\n rasterized_emissions = rasterize(\n shapes=zip(shapes, cell_emissions),\n out_shape=(len(x), len(y)),\n transform=from_bounds(minx, miny, maxx, maxy, len(x), len(y)),\n all_touched=False,\n merge_alg=MergeAlg.add,\n )[\n ::-1, :\n ] # flip the y axis\n\n # Get the coordinates of the rasterized polygon\n indices = np.array(np.where(rasterized_emissions)).T\n\n # Write the polygon\n with open(self.file_cadastre, \"a\") as f:\n for i_x, i_y in indices:\n f.write(\n f\"{x[i_x]},{y[i_y]},{info.height},\"\n f\"{self.polygon_raster_size},{self.polygon_raster_size},{info.vertical_extension},\"\n f\"{rasterized_emissions[i_x, i_y]},0,0,0,{source_group},\\n\"\n )", "def get_polygonsets(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n polygonsets = self.ref_cell.get_polygonsets(depth=depth)\n array = []\n for i in range(self.columns):\n for j in range(self.rows):\n spc = numpy.array([self.spacing[0] * i, self.spacing[1] * j])\n for polygonset in polygonsets:\n ps = libcopy.deepcopy(polygonset)\n for ii in range(len(ps.polygons)):\n if self.magnification is not None:\n ps.polygons[ii] = ps.polygons[ii] * mag + spc\n else:\n ps.polygons[ii] = ps.polygons[ii] + spc\n if self.x_reflection:\n ps.polygons[ii] = ps.polygons[ii] * xrefl\n if self.rotation is not None:\n ps.polygons[ii] = (\n ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st\n )\n if self.origin is not None:\n ps.polygons[ii] = ps.polygons[ii] + orgn\n array.append(ps)\n return array", "def get_polygonsets(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n polygonsets = self.ref_cell.get_polygonsets(depth=depth)\n for ps in polygonsets:\n for ii in range(len(ps.polygons)):\n if self.x_reflection:\n ps.polygons[ii] = ps.polygons[ii] * xrefl\n if self.magnification is not None:\n ps.polygons[ii] = ps.polygons[ii] * mag\n if self.rotation is not None:\n ps.polygons[ii] = (\n ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st\n )\n if self.origin is not None:\n ps.polygons[ii] = ps.polygons[ii] + orgn\n return polygonsets", "def create_climatology(self):\n # Because data from all years are merged, the time coordinate must be \n # made consistent with basic integer values. Monthly dates values are \n # added to the attributes.\n time_points = self.cubelist[0].coord(self.time_coord).points\n new_time_points = range(1, len(time_points) + 1)\n new_time_atts = {'dates' : self.cube_dates}\n new_time_coord = iris.coords.DimCoord(new_time_points,\n standard_name=self.time_coord,\n attributes=new_time_atts)\n \n new_cubelist = []\n realization_num = 1\n for cube in self.cubelist:\n if len(cube.coord(self.realization).points) > 1:\n cube = self.cube_ensemble_mean(cube)\n # Make sure all realization points are unique.\n cube.coord(self.realization).points = [realization_num]\n # Replace time dimension.\n time_dim = cube.coord_dims(cube.coord(self.time_coord))\n cube.remove_coord(self.time_coord)\n if time_dim:\n cube.add_dim_coord(new_time_coord, time_dim)\n else:\n # If no time_dim, coordinate is auxiliary or scalar.\n cube.add_aux_coord(new_time_coord)\n \n new_cubelist.append(cube)\n realization_num += 1\n \n new_cube = iris.cube.CubeList(new_cubelist).merge_cube()\n clim_cube = self.cube_ensemble_mean(new_cube)\n \n # The initialisation data is now a mean of all years, so like with time\n # replace the coordinate with a single point and monthly initialisation\n # dates added to the attributes.\n init_points = clim_cube.coord(self.forecast_ref_time).points\n new_init_points = range(1, len(init_points) + 1)\n new_init_atts = {'dates' : self.cube_init_dates}\n new_init_coord = iris.coords.AuxCoord(new_init_points,\n standard_name=self.forecast_ref_time,\n attributes=new_init_atts)\n clim_cube.remove_coord(self.forecast_ref_time)\n clim_cube.add_aux_coord(new_init_coord)\n \n self.clim_cube = clim_cube\n return self.clim_cube", "def compute_cspace(obstacle_polygons, vehicle_polygon):\n\n enlarged_obstacles = []\n for obstacle in obstacle_polygons:\n enlarged_obstacles.append(minkowski_sum_fast(obstacle, vehicle_polygon))\n \n # TODO: merge intersecting polygons into one.\n return enlarged_obstacles", "def create_climatology(self):\n # Because data from all years are merged, the time coordinate must be \n # made consistent with basic integer values. Monthly dates values are \n # added to the attributes.\n time_points = self.cubelist[0].coord(self.time_coord).points\n new_time_points = range(1, len(time_points) + 1)\n new_time_atts = {'dates' : self.cube_dates}\n new_time_coord = iris.coords.DimCoord(new_time_points,\n standard_name=self.time_coord,\n attributes=new_time_atts)\n \n # Create a years coordinate so they can be meaned.\n years_coord = iris.coords.AuxCoord(1, long_name='years')\n \n new_cubelist = []\n for this_cube in self.cubelist:\n cube = this_cube.copy()\n # Add year of earliest date.\n cube.add_aux_coord(years_coord)\n cube.coord('years').points = cube_time_converter(\n cube.coord('time').points[0], \n self.time_unit).year\n # Replace time dimension.\n time_dim = cube.coord_dims(cube.coord(self.time_coord))\n cube.remove_coord(self.time_coord)\n if time_dim:\n cube.add_dim_coord(new_time_coord, time_dim)\n else:\n # If no time_dim, coordinate is auxiliary or scalar.\n cube.add_aux_coord(new_time_coord)\n \n new_cubelist.append(cube.copy())\n \n new_cube = iris.cube.CubeList(new_cubelist).merge_cube()\n if len(new_cube.coord('years').points) > 1:\n new_cube = new_cube.collapsed('years', iris.analysis.MEAN)\n \n self.clim_cube = new_cube\n return self.clim_cube", "def polygons(self):\n if self.type == 'Polygon':\n polygons = [self._geojson['geometry']['coordinates']]\n elif self.type == 'MultiPolygon':\n polygons = self._geojson['geometry']['coordinates']\n return [ [ [_lat_lons_from_geojson(s) for\n s in ring ] for\n ring in polygon] for\n polygon in polygons]", "def grid_spherical_decomposed(x, y, z, data, x_i, y_i, z_i, horz_res, missing_value=-32767):\n\n r_map = np.sqrt(x**2.0 + y**2.0) # cartesian radius from map (x,y) center\n az_map = np.arctan2(y,x) #azimuth in the cartesian system. might vary along a ray due to map projection curvature\n vcp = np.fromiter((np.median(az_map[:, i_az, :]) for i_az in range(az_map.shape[1])), np.float32)\n print x.shape\n \n r_i = np.arange(r_map.min(), r_map.max(), horz_res) # cartesian radius from map(x,y) center\n\n # also need to griddata the x, y, z geographic coordinates.\n # decomposed geometry in radar polar coordinates is a not a\n # geophysical coordinate system (it's really a tangent plane\n # coord sys without beam refraction effects), so really there \n # are two xyz systems in play here.\n\n # unless, if by using z and R = np.sqrt(x**2.0 + y**2.0), we remain in a cylinderical \n # system referenced to the map projection in use. I think this is true.\n\n # Interpolate from spherical to cylindrical.\n # Cylindrical system is a different\n # range coordinate than the radar range coordinate.\n az_idx = 1\n cyl_grid_shape = (r_i.shape[0], x.shape[az_idx], z_i.shape[0])\n cyl_grid = np.empty(cyl_grid_shape)\n \n for az_id in range(cyl_grid_shape[az_idx]):\n progress(az_id, cyl_grid_shape[az_idx], 'Gridding along azimuths')\n rhi_r = r_map[:, az_id, :]\n # rhi_y = y[:, az_id, :]\n # R_i = rhir = np.sqrt(x[:, az_id, :]**2.0 + y[:, az_id, :]**2.0)\n rhi_z = z[:, az_id, :]\n rhi_data = data[:, az_id, :]\n \n # input and output coordinates need to be taken from the same coordinate system\n cyl_grid[:, az_id, :] = griddata(rhi_r.flatten(), rhi_z.flatten(), rhi_data.flatten(), r_i, z_i).T\n print \"\\r\" + 'Gridding along azimuths ... done'\n # cyl_grid is r, az, z instead of r, az, el\n \n # get mesh of coordinates for all interpolated radii r_i and along the azimuth\n # since constant radar azimuth might have curvature induced by the map projection\n # it's tricky to do this.\n\n # steps:\n # Do new transform from r,az radar system to map system using r=r_i to get x,y\n # or \n # Just do naive assumption that azimuths are straight and accept the error (used this one)\n \n # interpolate from cylindrical to cartesian.\n grid = np.empty((len(x_i), len(y_i), len(z_i)), dtype=np.float32)\n for z_id in range(z_i.shape[0]):\n progress(z_id, z_i.shape[0], 'Gridding at constant altitude')\n cappi_x = r_i[:, None]*np.cos(vcp[None, :])\n cappi_y = r_i[:, None]*np.sin(vcp[None, :])\n cappi_data = cyl_grid[:,:,z_id]\n \n # input and output coordinates need to be taken from the same coordinate system\n grid_2d = griddata(cappi_x.flatten(), cappi_y.flatten(), cappi_data.flatten(), x_i, y_i).T\n grid[:, :, z_id] = grid_2d\n print \"\\r\" + 'Gridding at constant altitude ... done'\n \n grid[np.isnan(grid)] = missing_value\n \n return grid", "def _transform_polygons(self, polygons):\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n if isinstance(polygons, dict):\n for kk in polygons.keys():\n for ii in range(len(polygons[kk])):\n if self.x_reflection:\n polygons[kk][ii] = polygons[kk][ii] * xrefl\n if self.magnification is not None:\n polygons[kk][ii] = polygons[kk][ii] * mag\n if self.rotation is not None:\n polygons[kk][ii] = (\n polygons[kk][ii] * ct + polygons[kk][ii][:, ::-1] * st\n )\n if self.origin is not None:\n polygons[kk][ii] = polygons[kk][ii] + orgn\n else:\n for ii in range(len(polygons)):\n if self.x_reflection:\n polygons[ii] = polygons[ii] * xrefl\n if self.magnification is not None:\n polygons[ii] = polygons[ii] * mag\n if self.rotation is not None:\n polygons[ii] = polygons[ii] * ct + polygons[ii][:, ::-1] * st\n if self.origin is not None:\n polygons[ii] = polygons[ii] + orgn\n return polygons", "def mlab_plt_cube(xmin, xmax, ymin, ymax, zmin, zmax):\n faces = cube_faces(xmin, xmax, ymin, ymax, zmin, zmax)\n for grid in faces:\n x, y, z = grid\n mlab.mesh(x, y, z, opacity=0.1, color=(0.1, 0.2, 0.3))", "def CreateLandmask(Fieldset, test = False):\n \n \n \"\"\"\n This first set of lines creates a numpy array with u velocities and a numpy\n array with v velocities. First we get the U and V fields from the dataset. Then\n we compute a time chunk, which is needed because of the dataset. Then we only\n take the first slice of the U and V field (we do not need more for finding the land\n and ocean grids). As last we make an empty array which will be filled with zeros and \n ones.\n \"\"\"\n fU = Fieldset.U\n fV = Fieldset.V\n Fieldset.computeTimeChunk(fU.grid.time[0], 1) \n uvel_mask_c = fU.data[0,:,:] \n vvel_mask_c = fV.data[0,:,:]\n# vvel_mask_c = np.roll(vvel_mask_c, 1, axis = 0)\n landmask = np.zeros((uvel_mask_c.shape[0], uvel_mask_c.shape[1]))\n \n \"\"\"\n The first loop checks the value of the u and v velocitites. Notice that we get the\n values of two adjacent grid, since we're working with a C-grid.\n Visualizations of velocities in the C-grids(see below). So for a grid to be flagged identified\n as a land grid two U velocities and 2 V velocities need to be zero. The first loop makes all\n ocean grids 1 and land grids 0. \n ____ ____ ____ ____\n | V | V | \n | | | \n U T U T U\n | | | \n |____V____|_____V_____| \n \"\"\"\n \n for i in range (len(landmask[:,0])-1):\n for j in range (len(landmask[0,:])-1):\n u1 = uvel_mask_c[i,j]\n\n u2 = uvel_mask_c[i,j+1]\n\n v1 = vvel_mask_c[i,j]\n\n v2 = vvel_mask_c[i+1,j]\n\n if u1 != 0 or u2 != 0 or v1 != 0 or v2 != 0:\n landmask[i,j] = 1\n \n \n \"\"\"\n Change all zero to 1 and rest 0. since we want the land grids to be 1 and ocean\n grids to be 0. \n \"\"\"\n \n landmask = ChangeValues(landmask,0,1) \n \n \"\"\"\n The created landmask needs to be shifted upwards one grid. We will\n use the numpy roll function to do this.\n \"\"\"\n \n if test == True:\n plt.figure()\n plt.imshow(landmask)\n plt.colorbar()\n \n return landmask", "def _get_unstructured_collection(vlon, vlat, xm, vmin, vmax, basemap_object=None):\n\n #init\n Path = mpath.Path\n patches = []\n pdata = xm[0, :] * 1. # full list of data\n vmsk = np.ones_like(pdata).astype('bool') # mask to indicate which cells contain valid data\n\n for i in xrange(x.ncell):\n if np.any(vlon[i, :]) > 180.: # todo fix this properly !!!\n vmsk[i] = False\n continue\n if basemap_object is None:\n xv = vlon[i, :]\n yv = vlat[i, :]\n else:\n xv, yv = basemap_object(vlon[i, :], vlat[i, :]) # todo: how to properly deal with boundary problem ????\n if (vlon[i, :].min() < -100.) & (vlon[i, :].max() > 100.): # todo\n #... triangles across the boundaries of the projection are a problem\n # ... solution: generate two triangles ! TODO\n vmsk[i] = False\n continue\n\n verts = np.asarray([xv, yv]).T\n\n #--- specify how vertices are interconnected (here simple connection by lines)\n codes = [Path.MOVETO, Path.LINETO, Path.LINETO]\n\n #--- construct object and append to library of objects ---\n path = mpath.Path(verts, codes, closed=True)\n patches.append(mpatches.PathPatch(path))\n\n pdata = np.asarray(pdata)\n\n if vmin is None:\n vmin = pdata.min()\n if vmax is None:\n vmax = pdata.max()\n\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n collection = PatchCollection(patches, cmap=cmap, norm=norm, alpha=1., match_original=False, edgecolors='grey') # construct library of all objects\n collection.set_array(pdata[vmsk]) # assign data values here\n\n return collection", "def cube_faces(xmin, xmax, ymin, ymax, zmin, zmax):\n faces = []\n\n x, y = np.mgrid[xmin:xmax:3j, ymin:ymax:3j]\n z = np.ones(y.shape) * zmin\n faces.append((x, y, z))\n\n x, y = np.mgrid[xmin:xmax:3j, ymin:ymax:3j]\n z = np.ones(y.shape) * zmax\n faces.append((x, y, z))\n\n x, z = np.mgrid[xmin:xmax:3j, zmin:zmax:3j]\n y = np.ones(z.shape) * ymin\n faces.append((x, y, z))\n\n x, z = np.mgrid[xmin:xmax:3j, zmin:zmax:3j]\n y = np.ones(z.shape) * ymax\n faces.append((x, y, z))\n\n y, z = np.mgrid[ymin:ymax:3j, zmin:zmax:3j]\n x = np.ones(z.shape) * xmin\n faces.append((x, y, z))\n\n y, z = np.mgrid[ymin:ymax:3j, zmin:zmax:3j]\n x = np.ones(z.shape) * xmax\n faces.append((x, y, z))\n\n return faces", "def makeup_polygons(\n draw: ImageDraw,\n num_cells: int,\n width: int,\n height: int,\n rgb_im: Image,\n random: bool,\n):\n voronoi, points = generate_voronoi_diagram(num_cells, width, height)\n for point, index in zip(points, voronoi.point_region):\n # Getting the region of the given point\n region = voronoi.regions[index]\n # Getting the points in arrays\n polygon = list()\n for i in region:\n # If vector is out of plot do not add\n if i != -1:\n polygon.append(voronoi.vertices[i])\n # Make tuples of the points\n polygon_tuples = list()\n for l in polygon:\n polygon_tuples.append(tuple(l))\n rgb = (0, 0, 0)\n if random:\n # Get random color\n rgb = random_color()\n else:\n # Get colors of the middle point\n rgb = get_color_of_point(point, rgb_im, width, height)\n # Draw the calculated polygon with the color of the middle point\n if polygon and polygon_tuples:\n draw.polygon(polygon_tuples, rgb)", "def test_cube(self):\n\n # No isosurface\n cube_zero = numpy.zeros((2, 2, 2), dtype=numpy.float32)\n\n result = marchingcubes.MarchingCubes(cube_zero, 1.)\n self.assertEqual(result.shape, cube_zero.shape)\n self.assertEqual(result.isolevel, 1.)\n self.assertEqual(result.invert_normals, True)\n\n vertices, normals, indices = result\n self.assertEqual(len(vertices), 0)\n self.assertEqual(len(normals), 0)\n self.assertEqual(len(indices), 0)\n\n # Cube array dimensions: shape = (dim 0, dim 1, dim2)\n #\n # dim 0 (Z)\n # ^\n # |\n # 4 +------+ 5\n # /| /|\n # / | / |\n # 6 +------+ 7|\n # | | | |\n # |0 +---|--+ 1 -> dim 2 (X)\n # | / | /\n # |/ |/\n # 2 +------+ 3\n # /\n # dim 1 (Y)\n\n # isosurface perpendicular to dim 0 (Z)\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((1., 1.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 0], level)\n self.assertAllClose(normals, (1., 0., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 1 (Y)\n cube = numpy.array(\n (((0., 0.), (1., 1.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.2\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(vertices[:, 1], level)\n self.assertAllClose(normals, (0., -1., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 2 (X)\n cube = numpy.array(\n (((0., 1.), (0., 1.)),\n ((0., 1.), (0., 1.))), dtype=numpy.float32)\n level = 0.9\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 2], level)\n self.assertAllClose(normals, (0., 0., 1.))\n self.assertEqual(len(indices), 2)\n\n # isosurface normal in dim1, dim 0 (Y, Z) plane\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(normals[:, 2], 0.)\n self.assertEqual(len(indices), 2)", "def extract_region_curvilinear(cube, lat_bounds):\n\n cube = cube.copy() \n \n region_mask = create_region_mask(cube.coord('latitude').points, cube.shape, lat_bounds)\n land_ocean_mask = cube.data.mask\n complete_mask = region_mask + land_ocean_mask\n\n cube.data = numpy.ma.asarray(cube.data)\n cube.data.mask = complete_mask\n\n return cube", "def create_partition(mesh,polygons,enforce_exact=False):", "def get_cut_poly_array(self, planes, angles, disp, fix_pts):\n noPlanes = len(planes)\n plane_storer = [] #4, 2, 3ch in this order\n cut_poly_array = [] #4, 2, 3ch in this order\n\n view_type = ['4ch', '2ch', '3ch']\n\n for i in range(noPlanes):\n if fix_pts[0] == 'var': # for variability test\n origin = self.epi_apex_node\n else: # for foreshortening test\n origin = fix_pts[1+i]\n\n cutPoly_endo_epi, planeActor_endo_epi = self.get_edges_strips(planes[i], origin,\n view_type[i], self.plane_colors[i])\n cut_poly_array.append(cutPoly_endo_epi) # 4, 2, 3\n plane_storer.append(planeActor_endo_epi)\n\n\n # DISPLAY PURPOSES #\n\n # include apex_node\n apexA = include_points(list(self.epi_apex_node), 1, 15, (0, 0, 0))\n\n ## create legend box ##\n legend = vtk.vtkLegendBoxActor()\n legend.SetNumberOfEntries(3)\n\n legendBox = vtk.vtkCubeSource()\n legendBox.SetXLength(2)\n legendBox.SetYLength(2)\n legend.SetEntry(0, legendBox.GetOutput(), \"4 ch\", (0, 1, 0)) #green\n legend.SetEntry(1, legendBox.GetOutput(), \"2 ch\", (0, 0, 1)) #blue\n\n legend.UseBackgroundOn()\n legend.LockBorderOn()\n legend.SetBackgroundColor(0.5, 0.5, 0.5)\n\n # create text box to display the angles ..\n textActor = vtk.vtkTextActor()\n textActor.SetInput(\"4ch = \" + str(angles[0])\n + \"\\n\" + \"2ch = \" + str(angles[1]))\n textActor.SetPosition2(10, 40)\n textActor.GetTextProperty().SetFontSize(24)\n textActor.GetTextProperty().SetColor(1.0, 0.0, 0.0)\n\n # display x-y-z actor\n axes = get_axes_actor([80,80,80], [0,0,0])\n\n # lets display the rv_dir\n rv_dir_act = include_points(list(60*self.rv_dir), 1, 15, (1, 0 ,1))\n\n ren = vtk.vtkRenderer()\n ren.SetBackground(1.0, 1.0, 1.0)\n ren.AddActor(self.meshActor)\n\n # for plAct in [item for sublist in plane_storer for item in sublist]: # flatten list\n # ren.AddActor(plAct)\n\n ren.AddActor(plane_storer[0][0]) # 4ch endo\n ren.AddActor(plane_storer[0][1]) # 4ch epi\n ren.AddActor(plane_storer[1][0]) # 2ch endo\n ren.AddActor(plane_storer[1][1]) # 2ch epi\n # ren.AddActor(plane_storer[2][0]) # 3ch endo\n # ren.AddActor(plane_storer[2][1]) # 3ch epi\n\n self.meshActor.GetProperty().SetOpacity(1.0)\n ren.AddActor(legend)\n ren.AddActor2D(textActor)\n ren.AddActor(axes)\n ren.AddActor(apexA)\n ren.AddActor(rv_dir_act)\n\n if disp:\n vtk_show(ren)\n\n return cut_poly_array, plane_storer, ren", "def add_subdivision(self):\n temp_sub_vertices = []\n for plane in (self.subdivision_list):\n current_mids = []\n mid_m_01 = Vec3d(0, 0, 0, 0)\n mid_m_12 = Vec3d(0, 0, 0, 0)\n mid_m_20 = Vec3d(0, 0, 0, 0)\n\n mid_m_01.x = (plane[0].x + plane[1].x) / 2\n mid_m_01.y = (plane[0].y + plane[1].y) / 2\n mid_m_01.z = (plane[0].z + plane[1].z) / 2\n mid_m_01.w = plane[0].w\n\n mid_m_12.x = (plane[1].x + plane[2].x) / 2\n mid_m_12.y = (plane[1].y + plane[2].y) / 2\n mid_m_12.z = (plane[1].z + plane[2].z) / 2\n mid_m_12.w = plane[1].w\n\n mid_m_20.x = (plane[2].x + plane[0].x) / 2\n mid_m_20.y = (plane[2].y + plane[0].y) / 2\n mid_m_20.z = (plane[2].z + plane[0].z) / 2\n mid_m_20.w = plane[2].w\n\n current_mids = [mid_m_01, mid_m_12, mid_m_20]\n temp_sub_vertices.append(current_mids)\n\n for index in range(len(current_mids)):\n v0 = Vec3d(0, 0, 0, 0)\n v1 = Vec3d(0, 0, 0, 0)\n v2 = Vec3d(0, 0, 0, 0)\n\n v0.x = plane[index].x\n v0.y = plane[index].y\n v0.z = plane[index].z\n\n v1.x = current_mids[index].x\n v1.y = current_mids[index].y\n v1.z = current_mids[index].z\n\n v2.x = current_mids[index - 1].x\n v2.y = current_mids[index - 1].y\n v2.z = current_mids[index - 1].z\n\n temp_sub_vertices.append([v0, v1, v2])\n\n self.subdivision_list = temp_sub_vertices" ]
[ "0.70632774", "0.69024783", "0.6398612", "0.63738394", "0.6349219", "0.629017", "0.61750984", "0.60984653", "0.6011764", "0.60001856", "0.59929866", "0.5986397", "0.5956049", "0.5908103", "0.59058756", "0.5896524", "0.5848426", "0.57612944", "0.5757789", "0.57517886", "0.56667024", "0.5646014", "0.56056374", "0.5576864", "0.55569315", "0.5510638", "0.5497734", "0.5491792", "0.54905325", "0.5475545" ]
0.7548078
0
Forces to update striatum response history. In normal situation striatum do it automatically.
def update_response(self, response): self.stri.update_response(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_response(self, response):\r\n self.stri_ext.update_response(response)\r\n self.stri_int.update_response(response)", "def update_state(self, slate_documents, responses):", "async def update_session_history(request, call_next):\n response = await call_next(request)\n history = request.session.setdefault(\n 'history', []).append(request.url.path)\n return response", "def slot_history_changed(self, _sender, _data):\r\n last_candle = self.history.last_candle()\r\n if last_candle:\r\n self.client.history_last_candle = last_candle.tim", "def SaveHistory(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def slot_history_changed(self, history, _dummy):\r\n pass", "def onRegisterHistory(self):\n pass", "def update_history(self, guess='', result=''):\n item = json.dumps({'guess': guess, 'result': result})\n self.history.append(item)", "def history(self, history):\n self._history = history", "def test_hist_flush():\n FNAME = 'xonsh-SESSIONID.json'\n FNAME += '.flush'\n hist = History(filename=FNAME, here='yup', **HIST_TEST_KWARGS)\n hf = hist.flush()\n yield assert_is_none, hf\n with mock_xonsh_env({'HISTCONTROL': set()}):\n hist.append({'joco': 'still alive'})\n hf = hist.flush()\n yield assert_is_not_none, hf\n while hf.is_alive():\n pass\n with LazyJSON(FNAME) as lj:\n obs = lj['cmds'][0]['joco']\n yield assert_equal, 'still alive', obs\n os.remove(FNAME)", "def sendHistoricAndPrediction(data):\n global df_hisotorical\n df_hisotorical = get_historical_data(data[\"UserID\"])\n data[\"seqMode\"] = 0\n data[\"versionline\"] = 0\n rounded_pred = predict(data)\n n = sendReport(data, rounded_pred, data[\"satzID\"], \"intv5\")\n\n return rounded_pred", "def history():", "def update_TradeHistory(self, market):\n ##self.marketid is to do!!!\n mid = self.marketid(market)\n history = self.Request.fetch('markettrades',params={'marketid':mid})\n pair = self.Pairs[mid]\n self.TradeHistory[pair] = history\n return 0", "def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))", "def do(self, market_data):\r\n self.data.history = self.data.history + market_data", "def history(self, history):\n\n self._history = history", "def _problem_update_history(self, _):\n self._update_reward_values()\n self.history.curr_reward.append(self.curr_reward)\n self.history.curr_best_reward.append(self.curr_best_reward)", "def response(self, flow: mitmproxy.http.HTTPFlow):\n if \"https://stock.xueqiu.com/v5/stock/batch/quote.json?_t\" in flow.request.url and \"x=\" in flow.request.url:\n base_data = json.loads(flow.response.text)\n new_data = self.recursion(base_data,2)\n flow.response.text = json.dumps(new_data)", "def process_request(self, request):\n super(HistoryChangesetMiddleware, self).process_request(request)\n if request.META.get('REQUEST_METHOD') in ('GET', 'HEAD'):\n return\n request.changeset = None\n request.close_changeset = False\n # Default is to update cached objects as they are modified\n request.delay_cache = False\n\n changeset_id = request.GET.get('use_changeset')\n if changeset_id:\n changeset = Changeset.objects.get(id=changeset_id)\n if changeset.user != request.user:\n message = (\n 'Changeset %s has a different user.' % changeset_id)\n return self.bad_request(request, message)\n if changeset.closed:\n message = 'Changeset %s is closed.' % changeset_id\n return self.bad_request(request, message)\n request.changeset = changeset\n # Wait until changeset is manually closed to schedule cache updates\n request.delay_cache = True", "def _cache_response(self, packet):\n self.operator.update_message(packet.message_id, packet.from_node, packet.ret_parameters)", "def slot_fullhistory(self, dummy_sender, data):\r\n (history) = data\r\n\r\n if not len(history):\r\n self.debug(\"### history download was empty\")\r\n return\r\n\r\n def get_time_round(date):\r\n \"\"\"round timestamp to current candle timeframe\"\"\"\r\n return int(date / self.timeframe) * self.timeframe\r\n\r\n #remove existing recent candle(s) if any, we will create them fresh\r\n date_begin = get_time_round(int(history[0][\"date\"]))\r\n while len(self.candles) and self.candles[0].tim >= date_begin:\r\n self.candles.pop(0)\r\n\r\n new_candle = OHLCV(0, 0, 0, 0, 0, 0) #this is a dummy, not actually inserted\r\n count_added = 0\r\n for trade in history:\r\n date = int(trade[\"date\"])\r\n price = int(trade[\"price_int\"])\r\n volume = int(trade[\"amount_int\"])\r\n time_round = get_time_round(date)\r\n if time_round > new_candle.tim:\r\n if new_candle.tim > 0:\r\n self._add_candle(new_candle)\r\n count_added += 1\r\n new_candle = OHLCV(\r\n time_round, price, price, price, price, volume)\r\n new_candle.update(price, volume)\r\n\r\n # insert current (incomplete) candle\r\n self._add_candle(new_candle)\r\n count_added += 1\r\n self.debug(\"### got %d updated candle(s)\" % count_added)\r\n self.ready_history = True\r\n self.signal_fullhistory_processed(self, None)\r\n self.signal_changed(self, (self.length()))", "def store_response(self, new_response):\n self.responses.append(new_response)", "def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:\n response[\"choices\"][0][\"text\"] += stream_response[\"choices\"][0][\"text\"]\n response[\"choices\"][0][\"finish_reason\"] = stream_response[\"choices\"][0][\n \"finish_reason\"\n ]\n response[\"choices\"][0][\"logprobs\"] = stream_response[\"choices\"][0][\"logprobs\"]", "def UpdateHistory(self, cmdline, clear=0) :\n\t\t# we try to find a DTML Document which id is \".zshell_history\"\n\t\t# and save commands into it.\n\t\t# BUT we don't test for permissions to modify it because\n\t\t# a Manager may want to keep an history in a place that's usually\n\t\t# not writable by anyone.\n\t\t# I dont know if this is a good thing or not, maybe time will tell...\n\t\t# if there's no history document, then don't do anything.\n\t\thistory = self.getHistory()\n\t\tif history is not None :\n\t\t\t(username, dummy) = self.WhoAmI()\n\t\t\thistoryline = \"%s,%s,%s\" % (DateTime().strftime(\"%Y-%m-%d %H:%M:%S %Z\"), username, cmdline)\n\t\t\toldsrc = history.document_src()\n\t\t\tif clear :\n\t\t\t\tif self.HasPerms(history, 'Change DTML Documents') :\n\t\t\t\t\toldsrc = \"\" # we clear the history, and log the history --clear command\n\t\t\t\telse :\n\t\t\t\t\t# the user doesn't have the correct permissions to clear\n\t\t\t\t\t# the history, so his history --clear was already logged:\n\t\t\t\t\t# there's no need to log it a second time.\n\t\t\t\t\thistoryline = \"\"\n\t\t\telse :\n\t\t\t\tif oldsrc[0] == '<' :\n\t\t\t\t\t# this is probably a non empty .zshell_history DTML Document\n\t\t\t\t\t# I mean a DTML Document which was not emptied before being\n\t\t\t\t\t# used as the history document, so it still contains the\n\t\t\t\t\t# default DTML Document tags: all we have to do is to\n\t\t\t\t\t# empty it by ourselves: MAY BE DANGEROUS !\n\t\t\t\t\toldsrc = \"\"\n\t\t\t\telse :\n\t\t\t\t\toldsrc = oldsrc[:-1]\t# we want to eat the last '\\n'\n\t\t\tsrc = oldsrc + historyline + '\\n\\n' # Zope eats the last \\n character too\n\t\t\thistory.manage_edit(src, history.title)", "def history(self, update, context):\n\n message = update.message.text.lower().split(\" \")\n user = self.User(update)\n output = \"\"\n if message[1] == \"show\":\n if not self.data_base.has_history(user):\n output = \"you don't have any history\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = self.data_base.show_history(user)\n if len(output) > 4096:\n output = output[-4096::]\n self.data_base.log(user, update.message.text, \"Successfully showed history\")\n\n elif message[1] == \"clear\":\n if not self.data_base.has_history(user):\n output = \"your history is already clean\"\n else:\n self.data_base.clear_history(user)\n output = \"Clean\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = \"Looks like you have a little mistake\\n\" \\\n \"the correct way of using the /history command is:\\n\" \\\n \"/history show\\n\" \\\n \"/history clear\"\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)", "def handle_response(self, response):\n\n self._tmp_request_args = {}\n self.cache_response(response)", "def _update_head_history(self):\n # pylint: disable=broad-except\n try:\n head = [h for h in self._git.heads if h.name == self.head][0]\n self.head_hash = head.commit.hexsha\n self.head_history = [\n {\n \"commit\": str(c.newhexsha),\n \"timestamp\": c.time[0],\n \"message\": c.message,\n \"author\": {\"name\": c.actor.name, \"email\": c.actor.email},\n }\n for c in head.log()[::-1]\n ]\n except Exception as err:\n self.log.warn(\"Git head update error, ignoring: %s\", err, exc_info=True)\n self.head_history = []", "def _on_head_changed(self, change):\n if change.new:\n self._update_head_history()", "def history():\n backup_history()\n yield\n reset_history()", "def _push_history(self):\n self._history.append(self._state)" ]
[ "0.62283283", "0.60817045", "0.59958225", "0.59683615", "0.5823295", "0.5782251", "0.5748286", "0.5663176", "0.55442536", "0.55053854", "0.54971474", "0.5492235", "0.54634976", "0.5441245", "0.5435336", "0.5413573", "0.53847986", "0.537717", "0.5324745", "0.5322132", "0.53182274", "0.53110766", "0.5302265", "0.52775455", "0.52774245", "0.5253061", "0.5249423", "0.52436435", "0.52410346", "0.52400297" ]
0.6470134
1
Forces to update striatum response history. In normal situation striatum do it automatically.
def update_response(self, response): self.stri.update_response(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_response(self, response):\r\n self.stri_ext.update_response(response)\r\n self.stri_int.update_response(response)", "def update_state(self, slate_documents, responses):", "async def update_session_history(request, call_next):\n response = await call_next(request)\n history = request.session.setdefault(\n 'history', []).append(request.url.path)\n return response", "def slot_history_changed(self, _sender, _data):\r\n last_candle = self.history.last_candle()\r\n if last_candle:\r\n self.client.history_last_candle = last_candle.tim", "def SaveHistory(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def slot_history_changed(self, history, _dummy):\r\n pass", "def onRegisterHistory(self):\n pass", "def update_history(self, guess='', result=''):\n item = json.dumps({'guess': guess, 'result': result})\n self.history.append(item)", "def history(self, history):\n self._history = history", "def test_hist_flush():\n FNAME = 'xonsh-SESSIONID.json'\n FNAME += '.flush'\n hist = History(filename=FNAME, here='yup', **HIST_TEST_KWARGS)\n hf = hist.flush()\n yield assert_is_none, hf\n with mock_xonsh_env({'HISTCONTROL': set()}):\n hist.append({'joco': 'still alive'})\n hf = hist.flush()\n yield assert_is_not_none, hf\n while hf.is_alive():\n pass\n with LazyJSON(FNAME) as lj:\n obs = lj['cmds'][0]['joco']\n yield assert_equal, 'still alive', obs\n os.remove(FNAME)", "def sendHistoricAndPrediction(data):\n global df_hisotorical\n df_hisotorical = get_historical_data(data[\"UserID\"])\n data[\"seqMode\"] = 0\n data[\"versionline\"] = 0\n rounded_pred = predict(data)\n n = sendReport(data, rounded_pred, data[\"satzID\"], \"intv5\")\n\n return rounded_pred", "def history():", "def update_TradeHistory(self, market):\n ##self.marketid is to do!!!\n mid = self.marketid(market)\n history = self.Request.fetch('markettrades',params={'marketid':mid})\n pair = self.Pairs[mid]\n self.TradeHistory[pair] = history\n return 0", "def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))", "def do(self, market_data):\r\n self.data.history = self.data.history + market_data", "def history(self, history):\n\n self._history = history", "def _problem_update_history(self, _):\n self._update_reward_values()\n self.history.curr_reward.append(self.curr_reward)\n self.history.curr_best_reward.append(self.curr_best_reward)", "def response(self, flow: mitmproxy.http.HTTPFlow):\n if \"https://stock.xueqiu.com/v5/stock/batch/quote.json?_t\" in flow.request.url and \"x=\" in flow.request.url:\n base_data = json.loads(flow.response.text)\n new_data = self.recursion(base_data,2)\n flow.response.text = json.dumps(new_data)", "def process_request(self, request):\n super(HistoryChangesetMiddleware, self).process_request(request)\n if request.META.get('REQUEST_METHOD') in ('GET', 'HEAD'):\n return\n request.changeset = None\n request.close_changeset = False\n # Default is to update cached objects as they are modified\n request.delay_cache = False\n\n changeset_id = request.GET.get('use_changeset')\n if changeset_id:\n changeset = Changeset.objects.get(id=changeset_id)\n if changeset.user != request.user:\n message = (\n 'Changeset %s has a different user.' % changeset_id)\n return self.bad_request(request, message)\n if changeset.closed:\n message = 'Changeset %s is closed.' % changeset_id\n return self.bad_request(request, message)\n request.changeset = changeset\n # Wait until changeset is manually closed to schedule cache updates\n request.delay_cache = True", "def _cache_response(self, packet):\n self.operator.update_message(packet.message_id, packet.from_node, packet.ret_parameters)", "def slot_fullhistory(self, dummy_sender, data):\r\n (history) = data\r\n\r\n if not len(history):\r\n self.debug(\"### history download was empty\")\r\n return\r\n\r\n def get_time_round(date):\r\n \"\"\"round timestamp to current candle timeframe\"\"\"\r\n return int(date / self.timeframe) * self.timeframe\r\n\r\n #remove existing recent candle(s) if any, we will create them fresh\r\n date_begin = get_time_round(int(history[0][\"date\"]))\r\n while len(self.candles) and self.candles[0].tim >= date_begin:\r\n self.candles.pop(0)\r\n\r\n new_candle = OHLCV(0, 0, 0, 0, 0, 0) #this is a dummy, not actually inserted\r\n count_added = 0\r\n for trade in history:\r\n date = int(trade[\"date\"])\r\n price = int(trade[\"price_int\"])\r\n volume = int(trade[\"amount_int\"])\r\n time_round = get_time_round(date)\r\n if time_round > new_candle.tim:\r\n if new_candle.tim > 0:\r\n self._add_candle(new_candle)\r\n count_added += 1\r\n new_candle = OHLCV(\r\n time_round, price, price, price, price, volume)\r\n new_candle.update(price, volume)\r\n\r\n # insert current (incomplete) candle\r\n self._add_candle(new_candle)\r\n count_added += 1\r\n self.debug(\"### got %d updated candle(s)\" % count_added)\r\n self.ready_history = True\r\n self.signal_fullhistory_processed(self, None)\r\n self.signal_changed(self, (self.length()))", "def store_response(self, new_response):\n self.responses.append(new_response)", "def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:\n response[\"choices\"][0][\"text\"] += stream_response[\"choices\"][0][\"text\"]\n response[\"choices\"][0][\"finish_reason\"] = stream_response[\"choices\"][0][\n \"finish_reason\"\n ]\n response[\"choices\"][0][\"logprobs\"] = stream_response[\"choices\"][0][\"logprobs\"]", "def history(self, update, context):\n\n message = update.message.text.lower().split(\" \")\n user = self.User(update)\n output = \"\"\n if message[1] == \"show\":\n if not self.data_base.has_history(user):\n output = \"you don't have any history\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = self.data_base.show_history(user)\n if len(output) > 4096:\n output = output[-4096::]\n self.data_base.log(user, update.message.text, \"Successfully showed history\")\n\n elif message[1] == \"clear\":\n if not self.data_base.has_history(user):\n output = \"your history is already clean\"\n else:\n self.data_base.clear_history(user)\n output = \"Clean\"\n self.data_base.log(user, update.message.text, output)\n else:\n output = \"Looks like you have a little mistake\\n\" \\\n \"the correct way of using the /history command is:\\n\" \\\n \"/history show\\n\" \\\n \"/history clear\"\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)", "def UpdateHistory(self, cmdline, clear=0) :\n\t\t# we try to find a DTML Document which id is \".zshell_history\"\n\t\t# and save commands into it.\n\t\t# BUT we don't test for permissions to modify it because\n\t\t# a Manager may want to keep an history in a place that's usually\n\t\t# not writable by anyone.\n\t\t# I dont know if this is a good thing or not, maybe time will tell...\n\t\t# if there's no history document, then don't do anything.\n\t\thistory = self.getHistory()\n\t\tif history is not None :\n\t\t\t(username, dummy) = self.WhoAmI()\n\t\t\thistoryline = \"%s,%s,%s\" % (DateTime().strftime(\"%Y-%m-%d %H:%M:%S %Z\"), username, cmdline)\n\t\t\toldsrc = history.document_src()\n\t\t\tif clear :\n\t\t\t\tif self.HasPerms(history, 'Change DTML Documents') :\n\t\t\t\t\toldsrc = \"\" # we clear the history, and log the history --clear command\n\t\t\t\telse :\n\t\t\t\t\t# the user doesn't have the correct permissions to clear\n\t\t\t\t\t# the history, so his history --clear was already logged:\n\t\t\t\t\t# there's no need to log it a second time.\n\t\t\t\t\thistoryline = \"\"\n\t\t\telse :\n\t\t\t\tif oldsrc[0] == '<' :\n\t\t\t\t\t# this is probably a non empty .zshell_history DTML Document\n\t\t\t\t\t# I mean a DTML Document which was not emptied before being\n\t\t\t\t\t# used as the history document, so it still contains the\n\t\t\t\t\t# default DTML Document tags: all we have to do is to\n\t\t\t\t\t# empty it by ourselves: MAY BE DANGEROUS !\n\t\t\t\t\toldsrc = \"\"\n\t\t\t\telse :\n\t\t\t\t\toldsrc = oldsrc[:-1]\t# we want to eat the last '\\n'\n\t\t\tsrc = oldsrc + historyline + '\\n\\n' # Zope eats the last \\n character too\n\t\t\thistory.manage_edit(src, history.title)", "def handle_response(self, response):\n\n self._tmp_request_args = {}\n self.cache_response(response)", "def _update_head_history(self):\n # pylint: disable=broad-except\n try:\n head = [h for h in self._git.heads if h.name == self.head][0]\n self.head_hash = head.commit.hexsha\n self.head_history = [\n {\n \"commit\": str(c.newhexsha),\n \"timestamp\": c.time[0],\n \"message\": c.message,\n \"author\": {\"name\": c.actor.name, \"email\": c.actor.email},\n }\n for c in head.log()[::-1]\n ]\n except Exception as err:\n self.log.warn(\"Git head update error, ignoring: %s\", err, exc_info=True)\n self.head_history = []", "def _on_head_changed(self, change):\n if change.new:\n self._update_head_history()", "def history():\n backup_history()\n yield\n reset_history()", "def _push_history(self):\n self._history.append(self._state)" ]
[ "0.6231142", "0.60827225", "0.5993901", "0.59652436", "0.58213335", "0.57789826", "0.57437676", "0.5659112", "0.55398905", "0.55027115", "0.54957896", "0.5486744", "0.54617155", "0.54379827", "0.54329455", "0.5409475", "0.53816503", "0.5377957", "0.5323756", "0.5322567", "0.5316705", "0.5311833", "0.5304548", "0.52725494", "0.5272285", "0.52542144", "0.5246459", "0.5240093", "0.5236766", "0.5236035" ]
0.6472602
0
Function to plot, in log scale, the differential storage modulus, k as a function of stress, strain, or both. INPUT
def plot_k(x, k, linewidth = 1.5, marker = 'o', color = 'k', marker_facecolor = 'k'): # Plot the first variable x1 = x[0] plt.figure(figsize = (9,5)) plt.plot(x1, k, c = color, lw = linewidth, marker = marker, mec = color, mfc = marker_facecolor) plt.loglog() plt.ylabel('$K\'$ (Pa)') # If there is more than one dependent variable, # Plot also the second variable in a different figure try: x2 = x[1] plt.xlabel('$\sigma$ (Pa)') plt.pause(0.1) plt.figure(figsize =(9, 5)) plt.plot(x2, k, c = color, lw = linewidth, marker = marker, mec = color, mfc = marker_facecolor) plt.loglog() plt.ylabel('$K\'$ (Pa)') plt.xlabel('$\gamma$ (%)') except IndexError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exo1():\n x = [.5; .5]\n niter = 20\n E = []\n D = []\n X = []\n for i in 1: niter:\n X(: , i) = x\n E(end + 1) = f(x)\n D(end + 1) = norm(x)\n x = x - tau*Gradf(x)\n h = plot(log10(E))\n set(h, 'LineWidth', 2)\n axis tight\n title('log_{10}(x^{(k)})')", "def exponent_plot(self, k_mins=None, file=\"output.pdf\"):\n if k_mins == None:\n step = int(self.max_deg/20) if self.max_deg>20 else 1\n k_mins = range(1,self.max_deg,step)\n\n K=[]\n G=[]\n S=[]\n for k in k_mins:\n k_min = k - 0.5\n g,s = self.exponent(k_min)\n result = \"k_min=%.1f: %f+-%f\" % (k,g,s)\n print(result)\n K.append(k)\n G.append(g)\n S.append(s)\n print(\"%f <= gamma%s <= %f\" % (min(G), self.index, max(G)))\n p = pylab.errorbar(K, G, yerr=S)\n pylab.xlabel(\"k_min\")\n pylab.ylabel(\"gamma%s\" % self.index)\n pylab.title(\"The dependence from k_min of the exponent (%s)\" % \\\n self.degree_type)\n #pylab.gca().set_yscale(\"log\")\n #pylab.gca().set_xscale(\"log\")\n pylab.savefig(file)\n pylab.show()\n return p", "def plot(dsname, wdir = './', width = 1000.0, dt = 5.0*yt.units.Myr, fields = all_fields,\n thickness = 20.0, outdir = './enrichment_plots_kpc'):\n\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n gal = Galaxy(dsname, wdir = wdir)\n data = gal.df\n\n @derived_field(name=\"logNO\", units=\"\")\n def _logNO(field, data):\n return np.log10(data['N_Abundance'] / data['O_Abundance'])\n gal.ds.add_field((\"gas\", \"logNO\"), function=_logNO, units=\"\")\n\n make_filtered_field(gal.ds, 'logNO', ['O_Fraction','N_Fraction'])\n make_filtered_field(gal.ds, 'O_over_H', ['O_Fraction'])\n make_filtered_field(gal.ds, 'N_over_O', ['O_Fraction','N_Fraction'])\n# def _logNO_filtered(field,data):\n# x = data[('gas','logNO')]\n#\n# f1 = data[('gas','O_Fraction')]\n# f2 = data[('gas','N_Fraction')]\n#\n# x[ (f1 < tol) + (f2 < tol)] = np.nan\n#\n# return x\n# gal.ds.add_field(('gas','logNO_filtered'), function = _logNO_filtered, units = \"\")\n\n M = data['birth_mass']\n t_o = data['creation_time'].convert_to_units('Myr')\n MS_lifetime = data[('io','particle_model_lifetime')].to('Myr')\n MS_death = t_o + MS_lifetime\n px = (data['particle_position_x'] - gal.ds.domain_center[0]).to('pc')\n py = (data['particle_position_y'] - gal.ds.domain_center[1]).to('pc')\n pz = (data['particle_position_z'] - gal.ds.domain_center[2]).to('pc')\n\n recent_death = (MS_death > gal.ds.current_time - dt) * (MS_death <= gal.ds.current_time + 0.001*yt.units.Myr)\n alive = MS_death > gal.ds.current_time + 0.001*yt.units.Myr\n\n AGB = M < 8.0\n massive_star = (M > 8.0) * (M < 25.0)\n\n boxdim = np.array([width*1.25,width*1.25,thickness])*yt.units.pc\n region = gal.ds.box(gal.ds.domain_center - boxdim*0.5, gal.ds.domain_center + boxdim*0.5)\n\n proj = yt.ProjectionPlot(gal.ds, 'z', fields,\n weight_field = 'number_density', data_source = region, width = (width,'pc'))\n\n if 'number_density' in fields:\n proj.set_unit('number_density','cm**(-3)')\n proj.set_cmap('number_density','viridis')\n proj.set_zlim('number_density',1.0E-4,200.0)\n\n if 'O_over_H_filtered' in fields:\n proj.set_cmap('O_over_H_filtered','cubehelix')\n proj.set_log('O_over_H_filtered', False)\n proj.set_zlim('O_over_H_filtered', -5, 1)\n proj.set_colorbar_label('O_over_H_filtered', r'[O/H]')\n\n if 'N_over_O_filtered' in fields:\n proj.set_cmap('N_over_O_filtered','PRGn')\n proj.set_log('N_over_O_filtered',False)\n proj.set_zlim('N_over_O_filtered',-2,2)\n proj.set_colorbar_label('N_over_O_filtered', r'[N/O]')\n\n if 'logNO' in fields:\n proj.set_cmap('logNO','PRGn')\n proj.set_log('logNO',False)\n proj.set_zlim('logNO',-2,0.5)\n proj.set_colorbar_label('logNO', r'log( N / O )')\n\n if 'logNO_filtered' in fields:\n proj.set_cmap('logNO_filtered','PRGn')\n proj.set_log('logNO_filtered',False)\n proj.set_zlim('logNO_filtered',-2,0.5)\n proj.set_colorbar_label('logNO_filtered', r'log( N / O )')\n\n if 'Temperature' in fields:\n proj.set_cmap('Temperature', 'RdYlBu_r')\n proj.set_log('Temperature',True)\n proj.set_zlim('Temperature',10.0, 1.0E7)\n proj.set_colorbar_label('Temperature', r'Temperature (K)')\n\n if 'G_o' in fields:\n proj.set_cmap('G_o', 'cubehelix')\n proj.set_log('G_o', True)\n proj.set_zlim('G_o',0.05, 100.0)\n proj.set_colorbar_label('G_o', r'ISRF (G$_{\\rm o}$)')\n\n if 'Q0_flux':\n proj.set_cmap('Q0_flux', 'magma')\n proj.set_log('Q0_flux',True)\n proj.set_zlim('Q0_flux',1.0E-6, 1.0E-1)\n proj.set_colorbar_label('Q0_flux', r'HI Ionizing Radiation (s$^{-1}$)')\n\n Mstar = np.sum(gal.df['particle_mass'][ gal.df['particle_type'] == 11]).to('Msun')\n time = gal.ds.current_time.to('Myr')\n# proj.annotate_title(r\"Time = %1.1f Myr M$_{*}$ = %2.2E M$_{\\odot}$\"%(time.value,Mstar.value))\n proj.set_font( {'size' : 32} )\n proj.save(outdir + '/') # necessary\n\n\n dt = 5.0 * yt.units.Myr\n # buffer around image. otherwise points plotted near edge of image my run a little outside\n # viewing area, causing weird shifts in plotting. Not sure how to control this otherwise\n buffer = 15.0 # in pc\n in_image = (np.abs(pz) <= boxdim[2]*0.5) *\\\n (np.abs(px) <= (width*0.5 - buffer)) *\\\n (np.abs(py) <= (width*0.5 - buffer))\n\n pp = {}\n pp['massive_star_winds'] = in_image * alive * massive_star\n pp['AGB_winds'] = in_image * recent_death * AGB\n pp['SN'] = in_image * recent_death * massive_star\n #pp['other_stars'] = in_image * alive * (np.logical_not(pp['massive_star_winds']))\n\n for k in list(proj.plots.keys()):\n image = proj.plots[k]\n\n #\n # Now select and annotate the points we want\n #\n for s in list(pp.keys()):\n if np.size(px[pp[s]].value) > 0:\n print(np.size(px[pp[s]]), 'Particles in ', s, px[pp[s]], py[pp[s]])\n image.axes.scatter(px[pp[s]].value,py[pp[s]].value, s = ps[s], marker = markers[s], color = colors[s])\n else:\n print('No particles in ', s)\n\n# proj.refresh()\n# proj.hide_axes()\n proj.save(outdir + '/') # necessary\n\n if 'N_over_O' in fields:\n vmin,vmax = -2,2\n x = proj.plots['N_over_O']\n x.image.set_norm( MidpointNormalize(midpoint= 0.5*(vmin+vmax), vmin=vmin,vmax=vmax))\n x.cb.set_norm(MidpointNormalize(midpoint=0.5*(vmin+vmax),vmin=vmin,vmax=vmax))\n x.cb.update_normal(x.image)\n x.save(outdir + '/' + str(gal.ds) + '_Projection_z_N_over_O_number_density.png')\n\n if 'logNO' in fields:\n vmin, vmax = -2, 0.25\n x = proj.plots['logNO']\n x.image.set_norm( MidpointNormalize(midpoint= 0.0, vmin=vmin,vmax=vmax))\n x.cb.set_norm(MidpointNormalize(midpoint=0.0, vmin=vmin,vmax=vmax))\n x.cb.update_normal(x.image)\n x.save(outdir + '/' + str(gal.ds) + '_Projection_z_logNO_number_density.png')\n\n del(proj)\n del(gal)\n\n return", "def Tk(self, x, k):\n self._check(x, k)\n x = float(x)\n log_x = log(x)\n val = float(0)\n rho = self.rho[k]\n for n in range(1, self.N + 1):\n rho_k_over_n = rho[n]\n mu_n = self.mu[n]\n if mu_n != 0:\n z = Ei(rho_k_over_n * log_x)\n val += (mu_n / float(n)) * (2 * z).real()\n return -val", "def plot_phase_diagram(self):\n t_max = np.log(max(self.temperatures))\n d_min = np.log(min(self.distortions))\n y_axis = [np.log(i) - d_min for i in self.distortions]\n x_axis = [t_max - np.log(i) for i in self.temperatures]\n\n plt.figure(figsize=(12, 9))\n plt.plot(x_axis, y_axis)\n\n region = {}\n for i, c in list(enumerate(self.n_eff_clusters)):\n if c not in region:\n region[c] = {}\n region[c]['min'] = x_axis[i]\n region[c]['max'] = x_axis[i]\n for c in region:\n if c == 0:\n continue\n plt.text((region[c]['min'] + region[c]['max']) / 2, 0.2,\n 'K={}'.format(c), rotation=90)\n plt.axvspan(region[c]['min'], region[c]['max'], color='C' + str(c),\n alpha=0.2)\n plt.title('Phases diagram (log)')\n plt.xlabel('Temperature')\n plt.ylabel('Distortion')\n plt.show()", "def plot_kinetics(k_data, i_data, tlim=None, xlim=None, lb=10, mpp=0.33, seg_length=100, fps=10, plot=True):\n \n t = [] \n power = []\n \n # apply tlim\n if tlim == None:\n pass\n elif isinstance(tlim, int):\n tc = (k_data.segment-1)*seg_length/fps\n k_data = k_data.loc[ tc < tlim]\n i_data = i_data.loc[i_data.t / fps < tlim]\n elif isinstance(tlim, list) and len(tlim) == 2:\n assert(tlim[1]>tlim[0])\n tc = (k_data.segment-1)*seg_length/fps\n k_data = k_data.loc[ (tc < tlim[1]) & (tc >= tlim[0])]\n i_data = i_data.loc[(i_data.t / fps < tlim[1]) & (i_data.t / fps >= tlim[0])]\n else:\n raise ValueError('tlim should be None, int or list of 2 int') \n \n # compute exponents at different time\n # t, power will be plotted on ax1\n for idx in k_data.segment.drop_duplicates():\n subdata = k_data.loc[k_data.segment==idx]\n xx, yy = postprocess_gnf(subdata, lb, xlim=xlim, sparse=3)\n x = np.log(xx)\n y = np.log(yy)\n p = np.polyfit(x, y, deg=1)\n t.append((idx-1)*seg_length/fps)\n power.append(p[0])\n\n # rescale light intensity to (0, 1)\n # t1, i will be plotted on ax2\n t1 = i_data.t / fps\n i = i_data.intensity - i_data.intensity.min()\n i = i / i.max()\n \n data = {'t0': t, 'alpha': power, 't1': t1, 'i': i}\n \n if plot == True:\n # set up fig and ax\n fig = plt.figure()\n ax1 = fig.add_axes([0,0,1,1])\n ax2 = ax1.twinx()\n\n # plot t, power\n color = wowcolor(0)\n ax1.set_xlabel('$t$ [s]')\n ax1.set_ylabel('$\\\\alpha$', color=color)\n ax1.plot(t, power, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n # plot t1, intensity\n color = wowcolor(4)\n ax2.set_ylabel('$I$', color=color)\n ax2.plot(t1, i, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n return data, fig, ax1\n else:\n return data", "def plotLogs(d, rho, v, usingT=True):\n d = np.sort(d)\n\n dpth, rholog, vlog, zlog, rseries = getLogs(d, rho, v, usingT)\n nd = len(dpth)\n\n\n xlimrho = (1.95,5.05)\n xlimv = (0.25,4.05)\n xlimz = (xlimrho[0]*xlimv[0], xlimrho[1]*xlimv[1])\n\n # Plot Density\n plt.figure(1)\n\n plt.subplot(141)\n plotLogFormat(rholog*10**-3,dpth,xlimrho,'blue')\n plt.title('$\\\\rho$')\n plt.xlabel('Density \\n $\\\\times 10^3$ (kg /m$^3$)',fontsize=9)\n plt.ylabel('Depth (m)',fontsize=9)\n\n plt.subplot(142)\n plotLogFormat(vlog*10**-3,dpth,xlimv,'red')\n plt.title('$v$')\n plt.xlabel('Velocity \\n $\\\\times 10^3$ (m/s)',fontsize=9)\n plt.setp(plt.yticks()[1],visible=False)\n\n plt.subplot(143)\n plotLogFormat(zlog*10.**-6.,dpth,xlimz,'green')\n plt.gca().set_title('$Z = \\\\rho v$')\n plt.gca().set_xlabel('Impedance \\n $\\\\times 10^{6}$ (kg m$^{-2}$ s$^{-1}$)',fontsize=9)\n plt.setp(plt.yticks()[1],visible=False)\n\n plt.subplot(144)\n plt.hlines(d[1:],np.zeros(len(d)-1),rseries,linewidth=2)\n plt.plot(np.zeros(nd),dpth,linewidth=2,color='black')\n plt.title('Reflectivity');\n plt.xlim((-1.,1.))\n plt.gca().set_xlabel('Reflectivity')\n plt.grid()\n plt.gca().invert_yaxis()\n plt.setp(plt.xticks()[1],rotation='90',fontsize=9)\n plt.setp(plt.yticks()[1],visible=False)\n\n plt.tight_layout()\n plt.show()", "def dplot(self):\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n from sys import stderr\n print(\"ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function\", file=stderr)\n raise\n\n x_min = np.min(self.knot_vector)\n x_max = np.max(self.knot_vector)\n\n x = np.linspace(x_min, x_max, num=1000)\n\n ns = np.array([self.d(i) for i in x]).T\n\n for n in ns:\n plt.plot(x, n)\n\n return plt.show()", "def _plotting_formula(k, l, m):\n return (l + 0.2) * m / ((k - 0.4) * l)", "def plot_dispersion(kpts, enk):\n\n # Lattice constant and reciprocal lattice vectors\n # b1 = 2 pi/a (kx - ky + kz)\n # b2 = 2 pi/a (kx + ky - kz)\n # b3 = 2 pi/a (-kx + ky + kz)\n a = 5.556 # [A]\n b1 = (2 * np.pi / a) * np.array([1, -1, 1])\n b2 = (2 * np.pi / a) * np.array([1, 1, -1])\n b3 = (2 * np.pi / a) * np.array([-1, 1, 1])\n\n # L point in BZ is given by 0.5*b1 + 0.5*b2 + 0.5*b3\n # X point in BZ is given by 0.5*b2 + 0.5*b3\n lpoint = 0.5 * (b1 + b2 + b3)\n xpoint = 0.5 * (b2 + b3)\n\n # We can find kpoints along a path just by considering a dot product with lpoint and xpoint vectors.\n # Any kpoints with angle smaller than some tolerance are considered on the path and we can plot their frequencies\n deg2rad = 2 * np.pi / 360\n ang_tol = 1 * deg2rad # 1 degree in radians\n\n print(list(kpts))\n\n enkonly = np.array(enk['energy [Ryd]'])[:, np.newaxis]\n enkinds = np.array(enk['q_inds'])\n kptsonly = np.array(kpts[['kx [1/A]', 'ky [1/A]', 'kz [1/A]']]) / (2 * np.pi / a)\n kptsinds = np.array(kpts['q_inds'])\n kptsmag = np.linalg.norm(kptsonly, axis=1)[:, np.newaxis]\n\n dot_l = np.zeros(len(kpts))\n dot_x = np.zeros(len(kpts))\n\n # Separate assignment for gamma point to avoid divide by zero error\n nongamma = kptsmag != 0\n dot_l[np.squeeze(nongamma)] = np.divide(np.dot(kptsonly, lpoint[:, np.newaxis])[nongamma],\n kptsmag[nongamma]) / np.linalg.norm(lpoint)\n dot_x[np.squeeze(nongamma)] = np.divide(np.dot(kptsonly, xpoint[:, np.newaxis])[nongamma],\n kptsmag[nongamma]) / np.linalg.norm(xpoint)\n dot_l[np.squeeze(kptsmag == 0)] = 0\n dot_x[np.squeeze(kptsmag == 0)] = 0\n\n lpath = np.logical_or(np.arccos(dot_l) < ang_tol, np.squeeze(kptsmag == 0))\n xpath = np.logical_or(np.arccos(dot_x) < ang_tol, np.squeeze(kptsmag == 0))\n\n linds = kptsinds[lpath]\n xinds = kptsinds[xpath]\n lkmag = kptsmag[lpath]\n xkmag = kptsmag[xpath]\n\n plt.figure()\n\n for i, ki in enumerate(linds):\n energies = enkonly[enkinds == ki, 0]\n thiskmag = lkmag[i]\n if len(energies) > 1:\n veck = np.ones((len(energies), 1)) * thiskmag\n plt.plot(veck, energies, '.', color='C0')\n else:\n plt.plot(thiskmag, energies, '.', color='C0')\n\n for i, ki in enumerate(xinds):\n energies = enkonly[enkinds == ki, 0]\n thiskmag = lkmag[i]\n if len(energies) > 1:\n veck = np.ones((len(energies), 1)) * thiskmag\n plt.plot(-1 * veck, energies, '.', color='C1')\n else:\n plt.plot(-1 * thiskmag, energies, '.', color='C1')\n\n plt.xlabel('k magnitude')\n plt.ylabel('Energy in Ry')", "def momentum_kde2_paperplot(fields):\n plt.figure(figsize=(2.65, 2.5))\n ax = plt.axes([0.18, 0.17, 0.8, 0.8])\n colorList = [med_color, high_color]\n lw = 1.5\n i = 0\n meankx_2 = []\n meankx_3 = []\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(fields[0]))\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color, label='Equilibrium')\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color)\n ax.axhline(0, color='black', linestyle='--', linewidth=0.5)\n # ax.axvline(0, color='gray', linewidth=0.8, alpha=0.5)\n for ee in fields:\n ee_Vcm = ee/100\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + \"E_{:.1e}.npy\".format(ee))\n\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n meankx_2.append(utilities.mean_kx(chi_2_i, electron_df))\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n meankx_3.append(utilities.mean_kx(chi_3_i, electron_df))\n\n ax.plot(k_ax, kdist_2, '--', linewidth=lw, color=colorList[i], label='Cold '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n ax.plot(k_ax, kdist_3, '-', linewidth=lw,color=colorList[i], label='Warm '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n i = i + 1\n # ax.plot(k_ax, kdist_f0_3, '--', linewidth=lw, color='black', label=r'$f_0$')\n # ax.plot(meankx_2,np.mean(abs(kdist_2))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n # ax.plot(meankx_3,np.mean(abs(kdist_3))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.locator_params(axis='y', nbins=6)\n ax.locator_params(axis='x', nbins=6)\n # ax.tick_params(direction='in')\n ax.set_xlim(-0.085, 0.081)\n\n plt.xlabel(r'$\\rm k_x \\, \\, (\\AA^{-1})$')\n plt.ylabel(r'Deviational occupation $\\rm \\Delta f_{\\mathbf{k}}$')\n # plt.grid(lw=0.8, linestyle='dotted')\n # plt.ylabel(r'$\\delta f_{\\mathbf{k}}/f_{\\mathbf{k}}^0$')\n # plt.ylim([-1,1])\n plt.legend(frameon=False,prop={'size':different_small_size})\n plt.savefig(pp.figureLoc+'momentum_KDE2.png', dpi=600)", "def plotLogsInteract(d2,d3,rho1,rho2,rho3,v1,v2,v3,usingT=False):\n d = np.array((0.,d2,d3), dtype=float)\n rho = np.array((rho1,rho2,rho3), dtype=float)\n v = np.array((v1,v2,v3), dtype=float)\n plotLogs(d, rho, v, usingT)", "def degree_average(n0,l,nt,m,display=False):\t\n\n# initialise variables\n\n P = np.zeros((n0+nt,m))\n Pave = np.zeros((n0+nt,m))\n PD = np.zeros((n0+nt,m))\n \n# call stats to assing qnetm\n \n qnetm, _, _ = ns.stats(n0,l,nt,m)\n\n# create Pave \n# extract the unique list of qnet values from n=1->m realizations \n# take the average degree over the n realizations \n\n for n in range(m):\n P,Q = np.unique(qnetm[:,n],return_counts=True)\n for k in range(len(P)):\n PD[P[k],n] = Q[k]\n\n# normalize Pave and remove zeros\n \n Pave = np.mean(PD, axis=1)/(n0+nt)\n Pave = Pave[Pave>0]\n \n# declare our domain of 1->Pave realizations\n\n x = np.arange(1,np.size(Pave)+1)\n\n# seek to solve for k and a satisfying Pave = a*x**(-b)\n# reduce problem to log(Pave) = c - k*log(x) (c = log(a), and flip sgn(b) for now)\n\n b,c = np.polyfit(np.log(x), np.log(Pave), 1)\n\n# create log-log plot for when display is true\n\n if display:\n plt.figure()\n plt.plot(np.log(x), np.log(Pave), 'b')\n plt.plot(np.log(x), c + b*np.log(x), 'r')\n plt.xlabel('log(x) x=1->size(Pave)')\n plt.ylabel('log(Pave)')\n plt.title('log-log plot of x against Pave with power law fit')\n plt.legend(loc='best')\n plt.show()\n\n return -b", "def log_KDE(xp_path, kernel, bandwidth):\n\n log_file = \"{}/log.txt\".format(xp_path)\n log = open(log_file, \"a\")\n\n log.write(\"KDE configuration\\n\")\n log.write(\"Kernel: {}\\n\".format(kernel))\n log.write(\"Bandwidth: {}\\n\".format(bandwidth))\n log.write(\"GridSearchCV for hyperparameter selection? {}\\n\".format(Cfg.kde_GridSearchCV))\n\n log.write(\"\\n\\n\")\n log.close()", "def dk_plotting():\n heatmap_mode1_error_x(make_heatmap=False, make_panel=True)\n\n #heatmap_mode1_error_x()\n figure_2_combined_cross_sections()\n\n #heatmap_combined_error_c()\n #heatmap_combined_error_koff()\n #heatmap_kpr_error_c()\n #heatmap_kpr_error_koff()\n\n #heatmap_kpr2_error_c()\n #heatmap_kpr2_error_koff()\n\n ctildePosterior = [truncate(f, 3) for f in list(np.arange(0.0 * KON / KP, 5.0 * KON / KP + 0.005, 0.005))[1:]]\n kofftildePosterior = [truncate(f, 2) for f in list(np.arange(0.0 / KP, 50.0 / KP + 0.05, 0.05))[1:]]\n\n #heatmap_figure_4()\n\n return 0", "def plot_powerlaw(self, **kwargs):\n\n if self.gamma is None:\n self.exponent()\n p = powerlaw.plot(exponent=-self.gamma,\n xmax=self.max_deg, xmin=self.k_min,\n **kwargs\n )\n pylab.show()\n return p", "def plot_log(self, **kwargs):\n import matplotlib.pyplot as plt\n self._plot_pair(ylabel='Reflectivity', **kwargs)\n plt.yscale('log')", "def plot_r(f=500, d=100e-3, dr=0.01, picture_file=None, picture_formats=['png', 'pdf', 'svg']):#x_axis='r', \n import matplotlib.pyplot\n i = 0\n rs = []\n sigmas = []\n ys = []\n print \"r_soll ->\\tsigma ->\\tr\"\n datas = []\n for r in numpy.arange(0, 1+dr, dr) :\n for t in [0] :\n print \"%f\\t\" %(r),\n sigma = getSigma(r)\n print \"%f\\t\" % (sigma),\n rs.append(r)\n sigmas.append(sigma)\n v = getSynapticActivity(f=f, r=r, fireing_rate=1, duration=d, delay=t)\n #print v\n #matplotlib.pyplot.scatter(v, numpy.zeros( len(v) ) + i )\n r = vector_strength(f, v)\n print \"%f\" % (r)\n ys.append(r)\n i = i+1\n datas.append([sigma,r])\n numpy.savetxt(\"../../../Data/%.1f_%f@%i.dat\" % (getSigma(dr),dr,int(f*d)), datas) \n\n matplotlib.pyplot.figure()\n matplotlib.pyplot.xlabel('sigma')\n matplotlib.pyplot.ylabel('measured vector strength')\n matplotlib.pyplot.xlim(0, getSigma(dr))\n matplotlib.pyplot.ylim(0, 1)\n matplotlib.pyplot.grid()\n matplotlib.pyplot.scatter(sigmas,ys, marker='x', color='black')#, basex=10, basey=10, ls=\"-\"\n if(picture_file != None):\n for picture_format in picture_formats:\n matplotlib.pyplot.savefig(picture_file+'sigma_'+str(getSigma(dr))+'_'+str(int(f*d))+'.'+picture_format,format=picture_format)\n else:\n matplotlib.pyplot.show()\n\n matplotlib.pyplot.figure()\n matplotlib.pyplot.xlabel('aimed vector strength')\n matplotlib.pyplot.ylabel('measured vector strength')\n #matplotlib.pyplot.legend([\"based on %i examples / dot\" % (f*d) ], loc='best');\n matplotlib.pyplot.xlim(0, 1)\n matplotlib.pyplot.ylim(0, 1)\n matplotlib.pyplot.grid()\n\n matplotlib.pyplot.scatter(rs,ys, marker='x', color='black')\n if(picture_file != None):\n for picture_format in picture_formats:\n matplotlib.pyplot.savefig(picture_file+'_'+str(dr)+'_'+str(int(f*d))+'.'+picture_format,format=picture_format)\n else:\n matplotlib.pyplot.show()\n\n matplotlib.pyplot.close('all')\n datas = numpy.ndarray((len(datas),2), buffer=numpy.array(datas),dtype=float)\n return datas", "def plot_fig36(mu=41):\n fig, ax = plt.subplots(figsize=(default_width, default_height))\n # index: variance_type, type, mu, variance, unwrap\n # columns: slope, intercept, rvalue, pvalue, stderr, b\n all_kuhns = pd.read_csv('./csvs/kuhns_so_far.csv', index_col=np.arange(5))\n kg = all_kuhns.loc['box', 'geometrical', mu].reset_index()\n kg = kg.sort_values('variance')\n ax.plot(kg['variance'].values, kg['b'].values, '--^', markersize=3, label='Zero-temperature',\n color=red_geom)\n kf = all_kuhns.loc['box', 'fluctuations', mu].reset_index()\n kf = kf.sort_values('variance')\n ax.plot(kf['variance'].values, kf['b'].values, '-o', markersize=3, label='Fluctuating',\n color=teal_flucts)\n rdf = pd.read_csv('./csvs/r2/r2-fluctuations-exponential-link-mu_41-0unwraps.csv')\n b = rdf['kuhn'].mean()\n xlim = plt.xlim()\n plt.plot([-10, 50], [b, b], 'k-.', label='Maximum Entropy')\n plt.xlim(xlim)\n ax.set_ylim([0, 100])\n plt.xlabel('Linker length variability $\\pm\\sigma$ (bp)')\n plt.ylabel('Kuhn length (nm)')\n plt.legend()\n fig.text(1.3, 0, r'$\\pm 0 bp$', size=9)\n fig.text(1.6, 0, r'$\\pm 2 bp$', size=9)\n fig.text(1.9, 0, r'$\\pm 6 bp$', size=9)\n # plt.subplots_adjust(left=0.07, bottom=0.15, top=0.92, right=0.97)\n plt.tight_layout()\n plt.savefig('./plots/PRL/fig-3-kuhn_length_vs_window_size_41_sigma0to40.pdf',\n bbox_inches='tight')", "def plot():\n ts, ys, lin_model, K, us, dt_control, biass, end_time = simulate()\n\n matplotlib.rcParams.update({'font.size': 18})\n fig, axes = plt.subplots(\n 1, 3,\n figsize=(6.25 * 3, 5),\n gridspec_kw={'wspace': 0.3}\n )\n\n ax = axes[0]\n ax.plot(ts, us[:, lin_model.inputs[1]], 'k')\n ax.plot(ts, us[:, lin_model.inputs[0]], 'k--')\n\n ax.set_title(r'Inputs')\n ax.set_ylabel(r'$\\frac{L}{min}$')\n ax.set_xlabel(r't ($min$)')\n ax.legend([r'$F_{m, in}$', r'$F_{G, in}$'])\n ax.set_xlim([0, ts[-1]])\n\n ax = axes[1]\n ax.plot(ts, ys[:, 2], 'k')\n ax.plot(ts, ys[:, 0], 'grey')\n ax.plot(ts, ys[:, 3], 'k--')\n\n ax.set_title(r'Outputs')\n ax.set_ylabel(r'$\\frac{mg}{L}$')\n ax.set_xlabel(r't ($min$)')\n ax.set_xlim([0, ts[-1]])\n ax.legend([r'$C_{FA}$', r'$C_{G}$', r'$C_{E}$'])\n\n ax.axhline(lin_model.yd2n(K.ysp)[1], color='red')\n ax.axhline(lin_model.yd2n(K.ysp)[0], color='red', linestyle='--')\n\n ax = axes[2]\n ax.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 1],\n 'k'\n )\n ax.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 0],\n 'k--'\n )\n ax.legend([r'$C_{FA}$', r'$C_G$'])\n ax.set_title('bias')\n ax.set_ylabel(r'$\\frac{mg}{L}$')\n ax.set_xlabel(r't ($min$)')\n ax.set_xlim([0, ts[-1]])\n\n # plt.suptitle('Closedloop bioreactor without noise')\n # plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n plt.savefig('no_noise.pdf', bbox_inches='tight')\n plt.show()", "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def plotCFM(u_kln, N_k, num_bins=100):\n\n print \"Plotting the CFM figure...\"\n def leaveTicksOnlyOnThe(xdir, ydir, axis):\n dirs = ['left', 'right', 'top', 'bottom']\n axis.xaxis.set_ticks_position(xdir)\n axis.yaxis.set_ticks_position(ydir)\n return\n\n def plotdg_vs_dU(yy, df_allk, ddf_allk):\n sq = (len(yy))**0.5\n h = int(sq)\n w = h + 1 + 1*(sq-h>0.5)\n scale = round(w/3., 1)+0.4 if len(yy)>13 else 1\n sf = numpy.ceil(scale*3) if scale>1 else 0\n fig = pl.figure(figsize = (8*scale,6*scale))\n matplotlib.rc('axes', facecolor = '#E3E4FA')\n matplotlib.rc('axes', edgecolor = 'white')\n if P.bSkipLambdaIndex:\n ks = [int(l) for l in P.bSkipLambdaIndex.split('-')]\n ks = numpy.delete(numpy.arange(K+len(ks)), ks)\n else:\n ks = range(K)\n for i, (xx_i, yy_i) in enumerate(yy):\n ax = pl.subplot(h, w, i+1)\n ax.plot(xx_i, yy_i, color='r', ls='-', lw=3, marker='o', mec='r')\n leaveTicksOnlyOnThe('bottom', 'left', ax)\n ax.locator_params(axis='x', nbins=5)\n ax.locator_params(axis='y', nbins=6)\n ax.fill_between(xx_i, df_allk[i]['BAR'] - ddf_allk[i]['BAR'], df_allk[i]['BAR'] + ddf_allk[i]['BAR'], color='#D2B9D3', zorder=-1)\n\n ax.annotate(r'$\\mathrm{%d-%d}$' % (ks[i], ks[i+1]), xy=(0.5, 0.9), xycoords=('axes fraction', 'axes fraction'), xytext=(0, -2), size=14, textcoords='offset points', va='top', ha='center', color='#151B54', bbox = dict(fc='w', ec='none', boxstyle='round', alpha=0.5))\n pl.xlim(xx_i.min(), xx_i.max())\n pl.annotate(r'$\\mathrm{\\Delta U_{i,i+1}\\/(reduced\\/units)}$', xy=(0.5, 0.03), xytext=(0.5, 0), xycoords=('figure fraction', 'figure fraction'), size=20+sf, textcoords='offset points', va='center', ha='center', color='#151B54')\n pl.annotate(r'$\\mathrm{\\Delta g_{i+1,i}\\/(reduced\\/units)}$', xy=(0.06, 0.5), xytext=(0, 0.5), rotation=90, xycoords=('figure fraction', 'figure fraction'), size=20+sf, textcoords='offset points', va='center', ha='center', color='#151B54')\n pl.savefig(os.path.join(P.output_directory, 'cfm.pdf'))\n pl.close(fig)\n return\n\n def findOptimalMinMax(ar):\n c = zip(*numpy.histogram(ar, bins=10))\n thr = int(ar.size/8.)\n mi, ma = ar.min(), ar.max()\n for (i,j) in c:\n if i>thr:\n mi = j\n break\n for (i,j) in c[::-1]:\n if i>thr:\n ma = j\n break\n return mi, ma\n\n def stripZeros(a, aa, b, bb):\n z = numpy.array([a, aa[:-1], b, bb[:-1]])\n til = 0\n for i,j in enumerate(a):\n if j>0:\n til = i\n break\n z = z[:, til:]\n til = 0\n for i,j in enumerate(b[::-1]):\n if j>0:\n til = i\n break\n z = z[:, :len(a)+1-til]\n a, aa, b, bb = z\n return a, numpy.append(aa, 100), b, numpy.append(bb, 100)\n\n K = len(u_kln)\n yy = []\n for k in range(0, K-1):\n upto = min(N_k[k], N_k[k+1])\n righ = -u_kln[k,k+1, : upto]\n left = u_kln[k+1,k, : upto]\n min1, max1 = findOptimalMinMax(righ)\n min2, max2 = findOptimalMinMax(left)\n\n mi = min(min1, min2)\n ma = max(max1, max2)\n\n (counts_l, xbins_l) = numpy.histogram(left, bins=num_bins, range=(mi, ma))\n (counts_r, xbins_r) = numpy.histogram(righ, bins=num_bins, range=(mi, ma))\n\n counts_l, xbins_l, counts_r, xbins_r = stripZeros(counts_l, xbins_l, counts_r, xbins_r)\n counts_r, xbins_r, counts_l, xbins_l = stripZeros(counts_r, xbins_r, counts_l, xbins_l)\n\n with numpy.errstate(divide='ignore', invalid='ignore'):\n log_left = numpy.log(counts_l) - 0.5*xbins_l[:-1]\n log_righ = numpy.log(counts_r) + 0.5*xbins_r[:-1]\n diff = log_left - log_righ\n yy.append((xbins_l[:-1], diff))\n\n plotdg_vs_dU(yy, df_allk, ddf_allk)\n return", "def diagnosticos(): \r\n global rhoe,Ex,npuntos_malla,itiempo,longitud_malla,rho0,aP,v1,v2,F\r\n global EnergiaK, EnergiaP, EnergiaT, emax\r\n global iout,igrafica,ifase,ivdist, distribucion\r\n global Archivos_Densidades, Archivos_Campo, Archivos_Efase, Archivos_Fdistribucion\r\n \r\n # Se crea el eje para graficar las cantidades fisicas involucradas:\r\n xgrafica = dx * sp.arange(npuntos_malla+1)\r\n \r\n if (itiempo == 0): \r\n plt.figure('Cantidades')\r\n plt.clf()\r\n \r\n if (igrafica > 0):\r\n # Se grafica cada paso dado por el contador igrafica:\r\n if (sp.fmod(itiempo,igrafica) == 0): \r\n # Densidad total\r\n plt.figure(1)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, -(rhoe+rho0), 'r', label='Densidad')\r\n plt.xlabel('x')\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-1.5,1.5)\r\n plt.legend(loc=1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_densidad'%(5, itiempo)\r\n Archivos_Densidades[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n # Campo electrico\r\n plt.figure(2)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, Ex, 'b' , label = 'Ex')\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('Ex', fontsize = 18)\r\n plt.xticks(np.linspace(0,16,4), fontsize = 18)\r\n plt.yticks(np.linspace(-0.0010,0.0010,5), fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-0.0015,0.0015)\r\n plt.legend(loc = 1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_campoelectrico'%(5, itiempo)\r\n Archivos_Campo[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n if (ifase > 0):\r\n if (sp.fmod(itiempo,ifase) == 0): \r\n # Se grafica el espacio de fase en el paso dado por el contador ifase:\r\n plt.figure(3)\r\n if (itiempo >0 ): plt.cla()\r\n v1 = sp.zeros(nparticulas)\r\n v2 = sp.zeros(nparticulas)\r\n x1 = sp.zeros(nparticulas)\r\n x2 = sp.zeros(nparticulas)\r\n for i in range(nparticulas):\r\n if (v[i-1]>v[i]):\r\n v1[i]=v[i]\r\n x1[i]=x[i]\r\n elif(v[i-1]<v[i]):\r\n v2[i]=v[i]\r\n x2[i]=x[i] \r\n if(distribucion == 0):\r\n plt.scatter(x,v,marker='.',s=0.1,color='black') \r\n elif(distribucion == 1 or distribucion == 2):\r\n plt.scatter(x1,v1,marker='.',s=0.1,color='red') \r\n plt.scatter(x2,v2,marker='.',s=0.1,color='blue')\r\n plt.xticks(np.linspace(0,100,6), fontsize = 18)\r\n plt.yticks(np.linspace(-8,8,5), fontsize = 18)\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('v', fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-4,8)\r\n\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_espaciofase'%(5, itiempo)\r\n Archivos_Efase[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=240)\r\n \r\n if (ivdist > 0):\r\n if (sp.fmod(itiempo,ivdist)==0):\r\n plt.figure(4)\r\n if (itiempo >0 ): plt.cla() \r\n plt.scatter(v,F,marker = '.' , s=0.1, color ='green')\r\n plt.xlim(-5*vh,5*vh)\r\n plt.ylim(0,1.0)\r\n plt.xlabel('v')\r\n plt.ylabel('f(v)')\r\n #fn_vdist = 'vdist_%0*d'%(5, itiempo)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_fdistribucion'%(5, itiempo)\r\n Archivos_Fdistribucion[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n #Se escriben los datos de la distribucion en un archivo:\r\n# sp.savetxt(fn_vdist, sp.column_stack((v,F)),fmt=('%1.4e','%1.4e')) \r\n \r\n # Energia cinetica:\r\n v2 = v**2\r\n EnergiaK[itiempo] = 0.5*masa*sum(v2)\r\n \r\n # Energia potencial:\r\n e2 = Ex**2\r\n EnergiaP[itiempo] = 0.5*dx*sum(e2)\r\n emax = max(Ex) # Campo maximo para analisis de inestabilidad\r\n \r\n # Energia total: \r\n EnergiaT[itiempo] = EnergiaP[itiempo] + EnergiaK[itiempo]\r\n \r\n return True", "def plot_likelihood(par_num, par_rng):\n\n likelihoods = np.load('data%s_RM.npy' % (par_num))\n\n plt.figure()\n plt.plot(par_rng, likelihoods, 'bo-')\n plt.xlabel('Value Mapped')\n plt.ylabel('Log(Likelihood)')\n plt.title('Likelihood Function of Parameter %s: %s'\n % (par_num, hammu12.jf12_parameter_names[par_num]))\n plt.minorticks_on()\n plt.savefig('fig%s_RM.png' % (par_num))\n plt.close()", "def plot_pretty():\n\n ts, ys, lin_model, K, us, dt_control, biass, end_time = simulate()\n plt.style.use('seaborn-deep')\n\n black = '#2B2B2D'\n red = '#E90039'\n orange = '#FF1800'\n white = '#FFFFFF'\n yellow = '#FF9900'\n\n plt.figure(figsize=(12.8, 9.6))\n plt.rcParams.update({'font.size': 16, 'text.color': white, 'axes.labelcolor': white,\n 'axes.edgecolor': white, 'xtick.color': white, 'ytick.color': white})\n\n plt.gcf().set_facecolor(black)\n\n plt.subplot(2, 3, 1)\n plt.plot(ts, ys[:, 2], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[1], color=white)\n plt.title(r'$C_{FA}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 2)\n plt.plot(ts, ys[:, 0], color=orange)\n plt.axhline(lin_model.yd2n(K.ysp)[0], color=white)\n plt.title(r'$C_{G}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 3)\n plt.plot(ts, ys[:, 3], color=orange)\n plt.title(r'$C_{E}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 4)\n plt.plot(ts, us[:, lin_model.inputs[1]], color=red)\n plt.title(r'$F_{m, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 5)\n plt.plot(ts, us[:, lin_model.inputs[0]], color=red)\n plt.title(r'$F_{G, in}$')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n plt.subplot(2, 3, 6)\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 1],\n color=red\n )\n plt.plot(\n numpy.arange(dt_control, end_time, dt_control),\n biass[:, 0],\n color=yellow\n )\n plt.legend([r'$C_{FA}$', r'$C_G$'], facecolor=black)\n plt.title('bias')\n plt.xlim([0, ts[-1]])\n plt.gca().set_facecolor(black)\n\n # plt.suptitle('Closedloop bioreactor without noise')\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n plt.savefig('no_noise_pretty.png', transparent=True)\n plt.show()", "def plot(self, axis = None, xlabel=True, save=True, legend=True):\n if axis is None:\n fig, axis = plt.subplots(figsize=(10,7))\n # total kernel evaluations\n kernel_evaluations = 0.5 * self.Ms*(self.Ms - 1)\n effective_R = self.minimal_R * kernel_evaluations.reshape(-1,1,1)\n\n exponents = np.zeros(len(self.epsilons))\n\n for i, eps in enumerate(self.epsilons):\n means = np.mean(effective_R[:,:self.estimations,i],axis=-1)\n upper = np.quantile(effective_R[:,:self.estimations,i],upper_percentile,axis=-1)\n lower = np.quantile(effective_R[:,:self.estimations,i],lower_percentile,axis=-1)\n errors = np.array([means - lower, upper - means])\n\n p = np.polyfit(np.log(self.Ms), np.log(means), 1)\n exponents[i] = p[0]\n axis.errorbar(self.Ms, means, yerr=errors, marker='.', ecolor=colors[i], elinewidth=3., ls='',\n capsize=6,capthick=2., color=colors[i], ms=20, label = r'$\\varepsilon_0 = {{%s}}, \\quad R \\propto M^{{%.2f}}$'%(eps, p[0]))\n\n M_fine = np.geomspace(np.min(self.Ms),np.max(self.Ms))\n\n axis.plot(M_fine, np.exp(p[1])*M_fine**p[0],'-.',color=colors[i])\n\n axis.set_xscale('log')\n axis.set_yscale('log')\n axis.grid()\n if legend:\n axis.legend()\n if xlabel:\n axis.set_xlabel(r'Data size $M$')\n axis.set_ylabel(r'Total number of shots $R$')\n axis.set_xticks(self.Ms, self.Ms)\n axis.set_title(r'$\\lambda = {{%s}}$'%(1/self.C))\n #plt.show()\n sep = 'separable' if self.margin > 0 else 'overlap'\n if save:\n plt.savefig(f'plots/binomial_experiment_{sep}_C_{self.C}.png',dpi=300,bbox_inches='tight')\n \n return exponents", "def plot_JN_curve(self):\n x = bb_source.h * self.approx_JN_curve()[0] / (bb_source.k * self.T)\n y = self.approx_JN_curve()[1]\n fig, ax = plt.subplots()\n plt.loglog(x, y)\n plt.xlabel('$log(hf/kT)$')\n plt.ylabel('$log(P)$ in $10^{-12} \\cdot log(W)$')\n plt.yticks(y[0:self.num_bins:30], [\"%.3f\" % z for z in y[0:self.num_bins:30]*10**12])\n ax = plt.gca()\n plt.show()", "def plot(self, dis_type,diameter=\"*\",thickness=\"*\", loglog=False):\n if dis_type not in self.dis_types:\n print(\"Type %s does not exist, please check it\" % dis_type)\n return\n if diameter != \"*\" and (diameter not in self.diameters):\n print(\"Diameter %s does not exist, please check it\" % diameter)\n return\n if thickness != \"*\" and (thickness not in self.thicknesses):\n print(\"thickness %s does not exist, please check it\" % thickness)\n return\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title('%s' % self.plotTypes[dis_type])\n if diameter != \"*\":\n if thickness != \"*\":\n ax.set_title('%s , diameter = %s nm, thickness = %s nm' % (self.plotTypes[dis_type],diameter,thickness))\n else:\n ax.set_title('%s , diameter = %s nm' % (self.plotTypes[dis_type],diameter))\n \n if (thickness != \"*\" and diameter == \"*\"):\n ax.set_title('%s , thickness = %s nm' % (self.plotTypes[dis_type],thickness))\n\n for diam in sorted(self.distrs[dis_type]):\n if (diam==diameter and diameter!=\"*\") or diameter==\"*\":\n for thick in sorted(self.distrs[dis_type][diam]):\n if (thick==thickness and thickness!=\"*\") or thickness==\"*\":\n d = self.distrs[dis_type][diam][thick]\n if thickness==\"*\" and diameter==\"*\":\n lb = \" d= %s nm, t= %s nm\" % (diam,thick)\n else:\n if diameter==\"*\":\n lb = \"d= %s nm\" % (diam)\n else:\n lb = \"t= %s nm\" % (thick)\n ax.plot(d.x, d.y, label=lb)\n \n ax.legend(numpoints=1,loc=4)\n ax.grid(True)\n # Here we need to explicity say to show the plot\n plt.show()", "def plot4(self, plog=False):\n\n probs = pd.read_csv(self.probfile)\n\n plt.rc('font', size=14)\n fig, ax = plt.subplots()\n plt.plot(self.ds.freq, self.snr, 'k-', alpha=0.5, zorder=1)\n\n # plot the SNR range to search across when finding snr_modes\n for idx, line in enumerate(self.ds.mode_id['f0']):\n w = np.exp(self.ds.mode_id['w0'][idx])\n plt.axvline(x=line-w, color='b', linestyle='-', alpha=0.4)\n plt.axvline(x=line+w, color='b', linestyle='-', alpha=0.4)\n\n # overplot the predicted SNR values at the modes\n plt.scatter(probs['f0'], probs['SNR_Kepler'], label='Kepler - 4yrs', alpha=1, zorder=2)\n plt.scatter(probs['f0'], probs['SNR_TESS365'], label='TESS - 1 yr', alpha=1, zorder=3)\n plt.scatter(probs['f0'], probs['SNR_TESS27'], label='TESS - 27 days', alpha=1, zorder=4)\n\n if plog:\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel(r'$\\nu$ / $\\rm \\mu Hz$')\n plt.ylabel(r'SNR')\n\n mn = min(star.ds.mode_id['f0']) -\\\n (max(star.ds.mode_id['f0'])-min(star.ds.mode_id['f0']))/7.\n mx = max(star.ds.mode_id['f0']) +\\\n (max(star.ds.mode_id['f0'])-min(star.ds.mode_id['f0']))/7.\n plt.xlim([mn,mx])\n\n plt.legend()\n plt.title('KIC ' + str(self.ds.epic))\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'plot4_SNR' + self.ds.epic + '.pdf')", "def fig_TF(f, day_trfs, day_list, sta_trfs, sta_list, skey=''):\n\n import matplotlib.ticker as mtick\n\n # Extract only positive frequencies\n faxis = f > 0\n\n # Get max number of TFs to plot\n ntf = max(sum(day_list.values()), sum(sta_list.values()))\n\n # Define all possible compbinations\n tf_list = {'ZP': True, 'Z1': True, 'Z2-1': True,\n 'ZP-21': True, 'ZH': True, 'ZP-H': True}\n\n if ntf == 1:\n fig = plt.figure(figsize=(6, 1.75))\n else:\n fig = plt.figure(figsize=(6, 1.33333333*ntf))\n\n j = 1\n for key in tf_list:\n\n if not day_list[key] and not sta_list[key]:\n continue\n\n ax = fig.add_subplot(ntf, 1, j)\n\n if day_list[key]:\n for i in range(len(day_trfs)):\n ax.loglog(\n f[faxis],\n np.abs(day_trfs[i][key]['TF_'+key][faxis]),\n 'gray', lw=0.5)\n if sta_list[key]:\n ax.loglog(\n f[faxis],\n np.abs(sta_trfs[key]['TF_'+key][faxis]),\n 'k', lw=0.5)\n if key == 'ZP':\n ax.set_ylim(1.e-20, 1.e0)\n ax.set_xlim(1.e-4, 2.5)\n ax.set_title(skey+' Transfer Function: ZP',\n fontdict={'fontsize': 8})\n elif key == 'Z1':\n ax.set_ylim(1.e-5, 1.e5)\n ax.set_xlim(1.e-4, 2.5)\n ax.set_title(skey+' Transfer Function: Z1',\n fontdict={'fontsize': 8})\n elif key == 'Z2-1':\n ax.set_ylim(1.e-5, 1.e5)\n ax.set_xlim(1.e-4, 2.5)\n ax.set_title(skey+' Transfer Function: Z2-1',\n fontdict={'fontsize': 8})\n elif key == 'ZP-21':\n ax.set_ylim(1.e-20, 1.e0)\n ax.set_xlim(1.e-4, 2.5)\n ax.set_title(skey+' Transfer Function: ZP-21',\n fontdict={'fontsize': 8})\n elif key == 'ZH':\n ax.set_ylim(1.e-10, 1.e10)\n ax.set_xlim(1.e-4, 2.5)\n ax.set_title(skey+' Transfer Function: ZH',\n fontdict={'fontsize': 8})\n elif key == 'ZP-H':\n ax.set_ylim(1.e-20, 1.e0)\n ax.set_xlim(1.e-4, 2.5)\n ax.set_title(skey+' Transfer Function: ZP-H',\n fontdict={'fontsize': 8})\n\n j += 1\n\n ax.set_xlabel('Frequency (Hz)')\n plt.tight_layout()\n\n return plt" ]
[ "0.620244", "0.59672105", "0.5958095", "0.5921367", "0.58973336", "0.588664", "0.5876698", "0.58317745", "0.57675505", "0.5748258", "0.5699336", "0.5636468", "0.5604143", "0.5548979", "0.55203366", "0.5516201", "0.55131924", "0.5496997", "0.5495823", "0.54898053", "0.54882056", "0.54868346", "0.54757726", "0.5429623", "0.54188323", "0.5401828", "0.539508", "0.53834605", "0.5382056", "0.5370025" ]
0.62799585
0
Function to compute the mean curve for the differential elastic modulus for all the data within a file Note that it is based on interpolation! INPUT
def mean_kall_interp(filename, xvariable,num_interp = 100, show_plot = True, sample_header = 'Sample Description', stress_header = 'Stress (Pa)', strain_header = 'Strain (%)', k_header = 'K prime (Pa)', sep = ',', dec = '.'): # Read data and get all the samples within the data frame data = pd.read_csv(filename, sep = sep, decimal = dec) all_samples = data[sample_header].unique() # Define which dependent variable to extract if 'stress' in xvariable: xvar = stress_header elif 'strain' in xvariable: xvar = strain_header # Loop to get mean values of minimum and maximum xdata for the samples xmin = []; xmax = [] for isample in all_samples: data_sample = data.loc[data[sample_header] == isample] xsample = np.array(data_sample[xvar]) xmin.append(np.min(xsample)) xmax.append(np.max(xsample)) xmin_avg = np.mean(np.array(xmin)) xmax_avg = np.mean(np.array(xmax)) xmax_std = np.std(np.array(xmax)) print('Rupture: ', xmax_avg, '+/-', xmax_std) # Build interpolation vector xmin_log = np.log10(xmin_avg) xmax_log = np.log10(xmax_avg) xinterp = np.logspace(xmin_log, xmax_log, num = num_interp) #Loop to get the interpolated curves for each sample within the file k_all = [] for isample in all_samples: data_sample = data.loc[data[sample_header] == isample] xsample = data_sample[xvar] ksample = data_sample[k_header] k_interp = np.interp(xinterp, xsample, ksample) k_all.append(k_interp) k_all = np.array(k_all) kmean = np.mean(k_all, axis = 0) kstd = np.std(k_all, axis = 0) # Plot the average curve and standard deviation, if desired if show_plot == True: plt.fill_between(xinterp, kmean - kstd, kmean + kstd, color = 'lightgray', alpha = 0.8) plt.plot(xinterp, kmean, c = 'darkgray', marker = 'o', mfc = 'w') plt.ylabel('$K\'$ (Pa)') plt.xlabel(xvar) plt.loglog() return [xinterp, kmean, kstd]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMeanE(self):\n\n\n\t\tEBefore, EAfter = self.getEnergyEvolution()\n\n\t\tmeanBefore = np.mean(EBefore[-self.__Nkicks//5:])\n\t\tmeanAfter = np.mean(EAfter[-self.__Nkicks//5:])\n\t\tmeanTot = (meanBefore+meanAfter)/2\n\n\t\treturn meanBefore, meanAfter, meanTot", "def ex1_get(alpha,beta,pace,delta):\n\t\n\tfilename = seed+\"/ex_sim_a\"+str(alpha)+\"_p\"+str(pace)+\"_d\"+str(delta)+\".tmp\"\n\t\t\n\t# get the avg_energy vector\n\tavg_energy = [0]*(T+1)\n\tfile = open(filename,'r')\n\tfor _ in range(K):\n\t\tfile.readline() # the first line contains T\n\t\tfor t in range(T+1):\n\t\t\te_t = float(file.readline().split()[0]) # e_t is the 1st value\n\t\t\tavg_energy[t] += e_t/K\n\n\treturn avg_energy", "def ewm(dataArray):\r\n\r\n # normalized = np.zeros(dataArray.shape)\r\n starting_means = np.mean(dataArray[:init_block_size])\r\n starting_var = np.var(dataArray[:init_block_size])\r\n averages = np.copy(starting_means)\r\n variances = np.copy(starting_var)\r\n\r\n for i in range(0, len(dataArray)):\r\n # for the first samples, there are not enough previous samples to warrant an exponential weighted averaging\r\n # simply substract the true average of the first samples\r\n if i < init_block_size:\r\n dataArray[i] = (dataArray[i] - starting_means) / np.maximum(eps, np.sqrt(starting_var))\r\n else:\r\n #update the rolling mean and variance\r\n averages = 0.999 * averages + 0.001 * dataArray[i]\r\n variances = 0.999 * variances + 0.001 * (np.square(dataArray[i] - averages))\r\n\r\n dataArray[i] = (dataArray[i] - averages) / np.maximum(eps, np.sqrt(variances)) \r\n\r\n return dataArray", "def enstrophy_average(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3): # vorticity-3 component\n #---------------------------------------------------------------------#\n # Defining the domain variables #\n #---------------------------------------------------------------------#\n dim = omega1.shape\n time = dim[-1]\n avg = np.zeros(time)\n #---------------------------------------------------------------------#\n # Looping over the time variable #\n #---------------------------------------------------------------------#\n print_count = 51\n for i in range(0, time):\n term1 = np.square(omega1[:,:,:,i])\n term2 = np.square(omega2[:,:,:,i])\n term3 = np.square(omega3[:,:,:,i])\n enst = 0.5*(term1 + term2 + term3)\n avg[i] = np.mean(enst)\n #-----------------------------------------------------------------#\n # Printing statement #\n #-----------------------------------------------------------------#\n if print_count > 20:\n print('Enstrophy average ---> t_step = %i' %(i))\n print_count = 0\n print_count += 1\n\n return avg", "def energy(data):\n\n return np.real(np.mean(np.abs(data)**2, axis=1))", "def calculate_meanpT_fluc(dN_array, pT_array, pT_min=0.0, pT_max=3.0):\n npT_interp = 50\n pT_inte_array = linspace(pT_min, pT_max, npT_interp)\n\n nev, npT = dN_array.shape\n mean_pT_array = zeros(nev)\n for iev in range(nev):\n dN_interp = exp(interp(pT_inte_array, pT_array[iev, :],\n log(dN_array[iev, :] + 1e-30)))\n mean_pT_array[iev] = (sum(pT_inte_array**2.*dN_interp)\n /sum(pT_inte_array*dN_interp))\n\n # compute the error using jack-knife\n rn_array = zeros(nev)\n for iev in range(nev):\n array_idx = [True]*nev\n array_idx[iev] = False\n array_idx = array(array_idx)\n rn_ev = (std(mean_pT_array[array_idx])\n /(mean(mean_pT_array[array_idx]) + 1e-15))\n rn_array[iev] = rn_ev\n rn_mean = mean(rn_array, axis=0)\n rn_err = sqrt((nev - 1.)/nev*sum((rn_array - rn_mean)**2.))\n return([rn_mean, rn_err])", "def calculate_mean(data_dir):\n data = ([each for each in os.listdir(data_dir)\n if each.endswith('.h5')])\n all_data = []\n for num_data in data:\n processed_data = os.path.join(data_dir, num_data)\n file = h5py.File(processed_data, 'r') \n data = file.get('Processed_data') \n all_data.append(data)\n all_data = np.array(all_data)\n all_data = np.mean(all_data, axis=0)\n return all_data", "def m_average_fun(self, dx=df.dx):\n\n mx = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx)\n my = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx)\n mz = df.assemble(\n self.material._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx)\n volume = df.assemble(self.material._Ms_dg * dx)\n\n return np.array([mx, my, mz]) / volume", "def _get_mean(self):\n return self._get_conditional_negative_energy()", "def calculate_mean_pole(verbose=False, mode=0o775):\n #-- download the IERS pole coordinates file from remote servers\n FILE = 'eopc01.1900-now.dat'\n try:\n remote_buffer = pull_pole_coordinates(FILE, verbose=verbose)\n except:\n return\n\n #-- read contents from input file object\n file_contents = remote_buffer.read().decode('utf-8').splitlines()\n header = file_contents[0][1:].split()\n nlines = len(file_contents) - 1\n data = {h:np.zeros((nlines)) for h in header}\n #-- extract data for all lines\n for i,line in enumerate(file_contents[1:]):\n line_contents = line.split()\n for h,l in zip(header,line_contents):\n data[h][i] = np.float(l)\n #-- output mean pole coordinates\n xm = np.zeros((nlines))\n ym = np.zeros((nlines))\n #-- output file with mean pole coordinates\n LOCAL = pyTMD.utilities.get_data_path(['data','mean-pole.tab'])\n fid = open(LOCAL,'w')\n print(LOCAL) if verbose else None\n for i,T in enumerate(data['an']):\n #-- mean pole is Gaussian Weight of all dates with a = 3.40 years.\n Wi = np.exp(-0.5*((data['an']-T)/3.4)**2)\n xm[i] = np.sum(Wi*data['x(\")'])/np.sum(Wi)\n ym[i] = np.sum(Wi*data['y(\")'])/np.sum(Wi)\n print('{0:6.2f} {1:11.7f} {2:11.7f}'.format(T,xm[i],ym[i]),file=fid)\n #-- close the output file\n fid.close()\n #-- change the permissions mode of the output mean pole file\n os.chmod(LOCAL, mode)", "def Mean(data):\n return data.mean()", "def simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A):\n\tfilenames = glob.glob(grbdir + \"/MM_out/*\")\n\tbadpixfile = glob.glob(grbdir + \"/*badpix.fits\")[0]\n\tfilenames.sort()\n\tpix_cnts = np.zeros((16384,len(filenames)))\n\terr_pix_cnts = np.zeros((16384,len(filenames)))\n\ten = np.arange(5, 261., .5)\n\tsel = (en>=100) & (en <= 150)\n\ten_range = np.zeros(len(filenames))\n\tfor f in range(len(filenames)):\n\t\ten_range[f] = filenames[f][20:26]\n\terr_100_500 = (100.0 <= en_range.astype(np.float)) & (en_range.astype(np.float) <= 500.0)\n\terr_500_1000 = (500.0 < en_range.astype(np.float)) & (en_range.astype(np.float) <= 1000.0)\n\terr_1000_2000 = (1000.0 < en_range.astype(np.float)) & (en_range.astype(np.float) <= 2000.0)\n\texist_1000_2000 = np.where(err_1000_2000 == True)\n\tE = np.array([])\n\t\n\tprint \"Indices where energy is in between 1000 and 2000 :\",exist_1000_2000[0]\n\t\n\tfor i,f in enumerate(filenames):\n\t\t\tdata = fits.getdata(f + \"/SingleEventFile.fits\")\n\t\t\tE = np.append(E, float(f[20:26]))\n\t\t\terror = np.sqrt(data) \n\t\t\tdata[:,~sel] = 0.\n\t\t\terror[:,~sel] = 0.\n\t\t\tpix_cnts[:,i] = data.sum(1)*model(E[i], alpha, beta, E0, A,typ)/55.5\n\t\t\terr_pix_cnts[:,i] = np.sqrt(((error*model(E[i], alpha, beta, E0, A,typ)/55.5)**2).sum(1))\t\t\n\t\t\t\n\tpix_cnts_total = np.zeros((16384,))\n\terr_100_500_total = np.sqrt((err_pix_cnts[:,err_100_500]**2).sum(1))*(E[err_100_500][1]-E[err_100_500][0])\n\terr_500_1000_total = np.sqrt((err_pix_cnts[:,err_500_1000]**2).sum(1))*(E[err_500_1000][1]-E[err_500_1000][0])\n\n\tif (len(exist_1000_2000[0]) != 0):\n\t\terr_1000_2000_total = np.sqrt((err_pix_cnts[:,err_1000_2000]**2).sum(1))*(E[err_1000_2000][1]-E[err_1000_2000][0])\n\telse :\n\t\terr_1000_2000_total = 0\n\t\n\terr_pix_cnts_total = np.sqrt(err_100_500_total**2 + err_500_1000_total**2 + err_1000_2000_total**2) # dE is 5 from 100-500, 10 from 500-1000, 20 from 1000-2000\n\n\tfor i in range(16384):\n\t\t\tpix_cnts_total[i] = simps(pix_cnts[i,:], E)\t\t\t\n\n\tquad0pix = pix_cnts_total[:4096]\n\tquad1pix = pix_cnts_total[4096:2*4096]\n\tquad2pix = pix_cnts_total[2*4096:3*4096]\n\tquad3pix = pix_cnts_total[3*4096:]\n\t\t\n\terr_quad0pix = err_pix_cnts_total[:4096]\n\terr_quad1pix = err_pix_cnts_total[4096:2*4096]\n\terr_quad2pix = err_pix_cnts_total[2*4096:3*4096]\n\terr_quad3pix = err_pix_cnts_total[3*4096:]\n\t\n\tquad0 = np.reshape(quad0pix, (64,64), 'F')\n\tquad1 = np.reshape(quad1pix, (64,64), 'F')\n\tquad2 = np.reshape(quad2pix, (64,64), 'F')\n\tquad3 = np.reshape(quad3pix, (64,64), 'F')\n\t\t\n\terr_quad0 = np.reshape(err_quad0pix, (64,64), 'F')\n\terr_quad1 = np.reshape(err_quad1pix, (64,64), 'F')\n\terr_quad2 = np.reshape(err_quad2pix, (64,64), 'F')\n\terr_quad3 = np.reshape(err_quad3pix, (64,64), 'F')\n\t\n\tsim_DPH = np.zeros((128,128), float)\n\tsim_err_DPH = np.zeros((128,128), float)\n\t\n\tsim_DPH[:64,:64] = np.flip(quad0, 0)\n\tsim_DPH[:64,64:] = np.flip(quad1, 0)\n\tsim_DPH[64:,64:] = np.flip(quad2, 0)\n\tsim_DPH[64:,:64] = np.flip(quad3, 0)\n\t\n\tsim_err_DPH[:64,:64] = np.flip(err_quad0, 0)\n\tsim_err_DPH[:64,64:] = np.flip(err_quad1, 0)\n\tsim_err_DPH[64:,64:] = np.flip(err_quad2, 0)\n\tsim_err_DPH[64:,:64] = np.flip(err_quad3, 0)\n\n\tbadpix = fits.open(badpixfile)\n\tdphmask = np.ones((128,128))\n\t\n\tbadq0 = badpix[1].data # Quadrant 0\n\tbadpixmask = (badq0['PIX_FLAG']!=0)\n\tdphmask[(63 - badq0['PixY'][badpixmask]) ,badq0['PixX'][badpixmask]] = 0\n\n\tbadq1 = badpix[2].data # Quadrant 1\n\tbadpixmask = (badq1['PIX_FLAG']!=0)\n\tdphmask[(63 - badq1['PixY'][badpixmask]), (badq1['PixX'][badpixmask]+64)] = 0\n\n\tbadq2 = badpix[3].data # Quadrant 2\n\tbadpixmask = (badq2['PIX_FLAG']!=0)\n\tdphmask[(127 - badq2['PixY'][badpixmask]), (badq2['PixX'][badpixmask]+64)] = 0\n\n\tbadq3 = badpix[4].data # Quadrant 3\n\tbadpixmask = (badq3['PIX_FLAG']!=0)\n\tdphmask[(127 - badq3['PixY'][badpixmask]), badq3['PixX'][badpixmask]] = 0\n\t\t\t\n\toneD_sim = (sim_DPH*dphmask).flatten()\n\n\treturn oneD_sim*t_src,sim_DPH*t_src,dphmask,sim_err_DPH*t_src", "def get_mean(self):\n mean = np.array(np.zeros((4,8)))\n for i,c in enumerate(self.cellLines):\n for j,l in enumerate(self.ligands):\n mean[i][j] = self.aucs[c][l]['mean']\n return mean", "def calc_mean(a, b, c, d, e):\n return (a + b + c + d + e) / 5", "def test_basic_file_mean(self):\n index = pds.date_range(*self.bounds1)\n names = [''.join((date.strftime('%Y-%m-%d'), '.nofile'))\n for date in index]\n self.testInst.bounds = (names[0], names[-1])\n ans = avg.mean_by_file(self.testInst, 'dummy4')\n assert np.all(ans == 86399 / 2.0)\n\n return", "def calccalmean(self,blk):\n calind=self.getcalind(blk)\n x=self.spec[calind,:]\n return np.nanmean(x,axis=0)", "def mean(self):\n\t\treturn 0. #obtained by integrating 1.5x^3 from -1 to 1", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def mean(self) -> float:\n points = np.concatenate(\n [\n [self.t_min],\n -np.logspace(-5, -1, 5)[::-1],\n np.logspace(-5, -1, 5),\n [self.t_max],\n ]\n )\n\n mean = 0.0\n for left, right in zip(points[:-1], points[1:]):\n integral, _ = integrate.quad(self.cdf, left, right, limit=500)\n mean += right * self.cdf(right) - left * self.cdf(left) - integral\n\n return mean", "def bivariate_mean(x, y, pdf):\n\n if pdf.shape[0] != x.shape[0] or pdf.shape[1] != y.shape[0]:\n print(\"Error, mesh size does not match x and y\")\n n_x = x.shape[0]\n n_y = y.shape[0]\n mean_int_x, mean_int_y = 0.0, 0.0\n p_of_x, p_of_y = np.zeros(n_x), np.zeros(n_y)\n for i in range(0, n_x):\n for j in range(1, n_y):\n delta_y = y[j] - y[j - 1]\n p_of_x[i] += delta_y / 2.0 * (pdf[i, j] + pdf[i, j - 1])\n if i > 0:\n delta_x = x[i] - x[i - 1]\n mean_int_x += delta_x / 2.0 * (x[i] * p_of_x[i] + x[i - 1] * p_of_x[i - 1])\n\n for j in range(0, n_y):\n for i in range(1, n_x):\n delta_x = x[i] - x[i - 1]\n p_of_y[j] += delta_x / 2.0 * (pdf[i, j] + pdf[i - 1, j])\n if j > 0:\n delta_y = y[j] - y[j - 1]\n mean_int_y += delta_y / 2.0 * (y[j] * p_of_y[j] + y[j - 1] * p_of_y[j - 1])\n\n return mean_int_x, mean_int_y", "def disk_average(self, var, r_lim):\n # change the mask for the one in Flux\n npx = self.params['npx']\n npy = self.params['npy']\n npz = self.params['npz']\n number_domains = npx*npy*npz # so far only works for number_domains<100\n Lx = self.params['Lx']\n Ly = self.params['Ly']\n Lz = self.params['Lz']\n x0 = Lx/2 # center point in the x domain.\n y0 = Ly/2 # center point in the y domain.\n nz = self.params['nz']\n\n if var == 'NN': # maybe interpolate is field...\n nz = nz - 1\n\n t = self.read_vars(['t'])['t']\n n_time = t.shape[0]\n\n r_max = r_lim #0.45 # as in forced_plume_nudging.py\n z_max = 0.95\n\n means = np.zeros((n_time, nz))\n\n fields = self.read_vars([var, 'x', 'y'])\n\n if var in ['u', 'v', 'w']:\n axis_vel = {'u': 3, 'v': 2, 'w':1}\n fields[var] = velocity_interpolation(fields[var], axis=axis_vel[var])\n\n XX, YY = np.meshgrid(fields['x']/Lx - 0.5,\n fields['y']/Ly - 0.5)\n\n r = np.sqrt(XX**2 + YY**2)\n mask = ma.masked_outside(r, 0, r_max)\n #mask_2 = ma.masked_outside(ZZ, 0, z_max)\n\n for t in range(n_time):\n for z_lvl in range(nz):\n field_new = ma.masked_array(fields[var][t, z_lvl, :, :], mask.mask)\n means[t, z_lvl] = field_new.mean()\n\n #means = means/number_domains\n return means", "def calculate_mean_dark(data_dir):\n\n data = ([each for each in os.listdir(data_dir)\n if each.endswith('.h5')])\n \n all_data = []\n for num_data in data:\n #print(num_data)\n processed_data = os.path.join(data_dir, num_data)\n file = h5py.File(processed_data, 'r') \n data = file.get('Processed_data')\n all_data.append(data)\n #print\n\n all_data = np.array(all_data)\n all_data = np.mean(all_data, axis=0)\n return all_data", "def ex_sim_get(alpha,beta,pace,delta): \n\t\n\tfilename = seed+\"/ex_sim_a\"+str(alpha)+\"_p\"+str(pace)+\"_d\"+str(delta)+\".tmp\"\n\t\n\t# get the avg_energy and avg_overlap\n\tavg_energy = 0\n\tavg_overlap = 0\n\t\n\tfile = open(filename,'r')\n\tfor _ in range(K):\n\t\tdata = file.readline().split()\n\t\te_T,q_T = float(data[0]),float(data[1])\n\t\tavg_energy += e_T/K\n\t\tavg_overlap += q_T/K\n\t\n\treturn avg_energy, avg_overlap", "def _get_mean(self, sums, step):\n\n return sums/step", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def average(data):\n return np.average(data)", "def mae(t, y):\n\treturn mean_absolute_error(t, y)", "def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy", "def d_mean(x, y):\n return (x + y) / 2", "def meanAdjustELE(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n\n Neq = np.eye(numZD,dtype=float) * 0.01\n Apart = np.zeros((numd,numZD))\n sd = np.zeros(numd)\n\n for i in range(0,numd):\n iz = np.floor(data[i,2]/zenSpacing)\n sd[i] = np.sin(data[i,2]/180.*np.pi)\n Apart[i,iz] = 1.#-(data[i,2]-iz*zenSpacing)/zenSpacing)\n\n prechi = np.dot(data[:,3].T,data[:,3])\n Neq = np.add(Neq, np.dot(Apart.T,Apart) )\n Bvec = np.dot(Apart.T,data[:,3])\n Cov = np.linalg.pinv(Neq)\n \n Sol = np.dot(Cov,Bvec)\n \n postchi = prechi - np.dot(Bvec.T,Sol)\n \n pwl = Sol\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n\n model = np.dot(Apart,Sol)\n f = loglikelihood(data[:,3],model)\n dof = numd - np.shape(Sol)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n stats = {}\n stats['prechi'] = np.sqrt(prechi/numd)\n stats['postchi'] = np.sqrt(postchi/numd)\n stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)\n stats['aic'] = aic\n stats['bic'] = bic\n\n return pwl,pwlsig,stats" ]
[ "0.6035233", "0.59857315", "0.5804135", "0.5775241", "0.57595766", "0.5643076", "0.56369853", "0.5624777", "0.560856", "0.56008756", "0.5565176", "0.5548366", "0.5544753", "0.55032843", "0.5495962", "0.5483974", "0.5464041", "0.54538137", "0.5447607", "0.5447298", "0.54315394", "0.54283285", "0.5426334", "0.5423978", "0.53932756", "0.5383605", "0.53806186", "0.5377893", "0.53497165", "0.53343904" ]
0.6105286
0
parse the product detail page.
def parse_detail(self, response): loader = BeiBeiProductLoader(BeibeiProduct(), response=response) match = re.search(r'/detail/p/([0-9]+)\.html', response.url) if not match: self.logger.warn("product id not found from URL: %s", response.url) return product_id = int(match.group(1)) loader.add_value("id", product_id) loader.add_css("name", "h3.over-title::text") loader.add_value("category", response.meta["cate_name"]) loader.add_css("description", "p.over-memo::text") img_info = self.parse_images(response.text) for v in img_info.values(): loader.add_value("file_urls", v) loader.add_value("img_info", img_info) yield loader.load_item() yield from self.parse_sku(product_id, response.text, img_info)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_detail_page(self, response):\n self.logger.info('Parse Detail Page function called on %s', response.url)\n item = response.meta.get('item', {})\n item['url'] = response.url\n item['title'] = response.css(TITLE_SELECTOR).extract_first(\"\").strip()\n item['price'] = self.get_price(response)\n return item", "def parse_details(self, response):\n items = response.xpath(\"//*[@id='all']//div[@class='prdct-box']\")\n for i in items:\n image_url = response.urljoin(i.xpath(\".//div[@class='prdct-box1']/a[1]/@href\").get())\n description = i.xpath(\".//div[@class='prdct-box2']//a[1]/text()\").get()\n item_no = i.xpath(\".//div[@class='prdct-box2']//text()[3]\").get(default='').strip()\n upc = i.xpath(\".//*[contains(text(),'UPC')]/following-sibling::text()[1]\").extract()[0].strip()\n category = i.xpath(\"//*[@id='all']//*[@class='products']/text()\").get()\n case = i.xpath(\".//*[contains(text(),'Case')]/following-sibling::text()[1]\").extract()[0]\n yield {\n \"VENDORID\":1068,\n \"VENDOR\":'UPD',\n \"ITEMNO\":item_no,\n \"UPC\":upc,\n \"CATEGORY\":category,\n \"DESCRIPTION\":description,\n \"IMAGE_URL\":image_url,\n \"CASEPACK\":case,\n \"PAGE_TITLE\":response.css('title::text').get(),\n \"PAGE_URL\":response.request.url\n }\n\n next_page = response.xpath(\"//p[@class='page-num']//a/@href\").extract()\n if next_page is not None:\n for n in next_page:\n next_page_url = response.urljoin(n)\n yield scrapy.Request(next_page_url, callback=self.parse_details)", "def parse(self, response):\n\n product_page_links = response.css('.detailsLink')\n yield from response.follow_all(product_page_links, self.parse_item)\n\n pagination_links = response.css('span.fleft a')\n yield from response.follow_all(pagination_links, self.parse)", "def get_product_info(self, product):\n\n product_link = self.url + product.a['href']\n product_page = self.get_response(product_link)\n product_soup = BeautifulSoup(product_page.content, 'html.parser')\n\n # get product details\n product_brand = product_soup.find('h2').text.strip()\n product_name = product_soup.find('h1').text.strip()\n\n product_details = product_soup.find('div', id='z-pdp-detailsSection')\n\n product_attributes = []\n for detail_section in product_details.find_all('div', class_='h-container h-flex-no-shrink h-tabs__panel h-align-left'):\n for tag in detail_section.find_all('p'):\n product_attributes.append(tag.text.strip())\n\n # get product image\n product_img_thumbs = product_soup.find('div', id='z-pdp-topSection')\n product_img_thumbs = product_img_thumbs.find(\n 'div', class_='h-container h-carousel h-carousel-thumbnail vertical h-align-left')\n\n img_links = []\n product_img_link = ''\n for img_thumb in product_img_thumbs.find_all('picture'):\n img_link = img_thumb.find('img')['src'].replace('thumb', 'zoom')\n if 'packshot' in img_link:\n product_img_link = img_link\n else:\n img_links.append(img_link)\n\n # product_img_link = 'https:' + product_img.split('\"')[1].split('?')[0]\n product_img_id = product_img_link.split('/')[-1].split('@')[0]\n\n return {'name': product_name,\n 'brand': product_brand,\n 'id': product_img_id,\n 'img_url': product_img_link,\n 'model_img_urls': ', '.join(img_links),\n 'attributes': ', '.join(product_attributes)}", "def parse_product_page(self, response):\n item = response.meta['url']\n category = re.search('2Cvideo&q=(.*)', item).group(1)\n title = response.xpath(\"//div/div/div/div/h1/text()\").extract()\n date = response.xpath(\"//div/div/div/div/time/text()\").extract()\n cat = response.xpath(\"//div/div/div/div/span/text()\").extract()\n topic = response.css('p ::text').getall()\n topic = topic[:topic.index('Keywords')]\n image = response.xpath(\"//picture/img/@src\").extract()\n\n yield{'Magazine':\"Wmag\",\n 'title': title[0],\n 'date': date[0],\n 'label': cat,\n 'category': category,\n 'image': image,\n 'topic' : topic,\n\n }\n logging.info(\"processing \" + response.url)\n yield None", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('.pageTitle span::text').get()\n item['available'] = not response.css('.dispo')\n item['promotion'] = not not response.css('.old')\n if item['promotion']:\n item['old_price'] = response.css('.old .amount::text').get()\n\n item['price'] = response.css('.new .amount::text').get()\n # Un tableau\n item['raw_string'] = response.css('.featureTable tr td::text').getall()\n \n return item", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['density'] = response.css('#description strong::text').get()\n item['available'] = not response.css('.msgSoldOut')\n\n\n for selector in response.css('.fpBktParam'):\n item['raw_string'] = selector.css('span::text').get()\n item['price'] = selector.css('div::text').getall()[1]\n yield item", "def parse_detail(self, response):\n\n self.logger.log(self.log_lvl, 'scraping data @ {}'.format(response.url))\n\n item_list = list()\n image_urls = list()\n # extract image\n try:\n pattern = re.compile(r\"(.*imagearray:)(.*)(,.*displaymode.*)\", re.MULTILINE | re.DOTALL)\n javascript_containing_images = response.xpath('//script[contains(., \"var mygallery=\")]/text()').extract()[0]\n images = re.match(pattern, javascript_containing_images).group(2)\n image_array = json.loads(images)\n image_urls = [urlparse.urljoin(response.url, itm[1]) for itm in image_array]\n except Exception as e:\n print(\"{} - {}\".format(type(e), str(e)))\n\n tipe_mobil = response.css('#content font.vehicleinfo ~ font.warning::text').extract_first()\n model_mobil = response.css('#content font.vehicleinfo::text').extract_first()\n if tipe_mobil.lower() == model_mobil.lower():\n tipe_mobil = response.meta.get('type', None)\n main_group = response.meta.get('main_group', None)\n assembly_set = response.css('#content font.title b::text').extract_first()\n\n # sparepart items\n for row in response.css('div#content div.content table tr'):\n item = IsuzuSparepartItem()\n\n # source_url\n item['source_url'] = response.url\n\n # car model\n item['merk'] = self.name\n item['tipe_mobil'] = tipe_mobil\n item['model_mobil'] = model_mobil\n\n # images\n item['image_urls'] = image_urls\n\n # grouping/assembly\n item['main_group'] = main_group\n item['assembly_set'] = assembly_set\n\n item['key'] = row.css('td.intable:nth-child(1) .detailcontent::text').extract_first()\n item['part_number'] = row.css('td.intable:nth-child(2) .detailcontent::text').extract_first()\n item['itc'] = row.css('td.intable:nth-child(3) .detailcontent::text').extract_first()\n item['description'] = row.css('td.intable:nth-child(4) .detailcontent::text').extract_first()\n item['qty'] = row.css('td.intable:nth-child(5) .detailcontent::text').extract_first()\n item['app_date'] = row.css('td.intable:nth-child(6) .detailcontent::text').extract_first()\n item['lr'] = row.css('td.intable:nth-child(7) .detailcontent::text').extract_first()\n item['model'] = row.css('td.intable:nth-child(8) .detailcontent::text').extract_first()\n item['remarks'] = row.css('td.intable:nth-child(9) .detailcontent::text').extract_first()\n\n item_list.append(item)\n\n return item_list", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n # item['product_name'] = response.css('h1::text').get()\n item['promotion'] = any(x == \"PROMOTION\" for x in response.css('h3::text').getall())\n item['description'] = response.css('.product__description p::text').getall()\n item['raw_string'] = response.css('.product__informations li::text').getall() # Array\n\n data = eval(response.css('script[type~=\"application/ld+json\"]::text').get())\n item['product_name'] = data['name']\n item['product_id'] = data['productID']\n item['price'] = data['offers']['price']\n item['currency'] = data['offers']['priceCurrency']\n item['available'] = data['offers']['availability'].endswith('InStock')\n item['latin_name'] = data['alternateName']\n return item", "def parse_product(self, response):\n # messy data\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('.current-price span::attr(content)').get()\n \n # sometimes quantity and/or seed number\n item['raw_string'] = ' '.join(response.css('.product-information span::text').getall())\n\n return item", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('.price > span::text').get()\n\n\n return item", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n # Réécriture de l'url\n item['url'] = response.css('meta[property~=\"og:url\"]::attr(content)').get()\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('span[itemprop~=price]::attr(content)').get()\n\n # quantity: « 100 graines * » OR « 5 grammes * »\n item['raw_string'] = response.css('#group_5 option[selected]::text').get()\n\n\n return item", "def get_product_details(self):\n\n db.execute(\"SELECT * FROM Product WHERE id = %s\", (self.id,))\n product = db.fetch()\n\n self.name = product[1]\n self.brand = product[2]\n self.nutriscore_id = product[3]\n self.store = product[4]\n self.description = product[5]\n self.url = product[6]", "def _parse_single_product(self, response):\n return self.parse_product(response)", "def parse_main(self, response):\n\n for i in response.xpath('//div[contains(@class,\"products-list__item\")]'):\n item = {\n \"VENDORID\": 1055,\n \"VENDOR\": 'JC SALES',\n \"ITEMNO\": i.xpath('.//span[contains(text(),\"Item No:\")]/text()').get().replace('Item No:', '').strip(),\n \"DESCRIPTION\": i.xpath('.//div[contains(@class,\"product-card__name\")]//a/text()').get(),\n \"IMAGE_URL\": i.xpath('.//div[contains(@class,\"product-card__image\")]//img[1]/@src').get(),\n \"PAGE_TITLE\": response.css('title::text').get(),\n \"PAGE_URL\": response.request.url\n }\n yield Request(response.urljoin(i.xpath('.//a[contains(@class,\"image__body\")]/@href').get()),\n self.parse_details, meta={'item': item})\n\n next_page = response.xpath('//a[text()=\">\"]/@href').get()\n if next_page is not None:\n next_page = response.urljoin(next_page)\n yield scrapy.Request(next_page, callback=self.parse_main)", "def parse_product(self, resp):\n loader = ItemLoader(item=EstateProperty(), response=resp)\n loader.add_value(\"url\", resp.request.url)\n\n # for the standard fields, extraction is straight forward\n for field, xpath in list(self.standard_fields.items()):\n loader.add_xpath(field, xpath)\n\n # exclude items where price is blank\n # may correspond to rentals\n price = resp.xpath(self.standard_fields['price']).extract_first()\n if price is None or price.strip()==\"\":\n # mark the item as dirty\n # to avoid sending it\n loader.add_value('is_dirty', True)\n\n # some items' titles are stored in a legacy path\n title = resp.xpath(self.standard_fields['title']).extract_first()\n if title is None or title.strip()==\"\":\n # try another way\n title = resp.xpath(self.special_fields['title_legacy']).extract_first()\n if title is None or title.strip()==\"\":\n # mark it dirty\n loader.add_value('is_dirty', True)\n else:\n loader.add_value('title', title)\n\n # sku is preprended by dirty text\n sku_dirty = resp.xpath(self.special_fields['sku']).extract_first()\n try:\n m = re.search(r'\\s{0,}\\S{3}\\s{1,}(?P<ref>.+)\\s{0,}', sku_dirty)\n loader.add_value('sku', m.group('ref'))\n except Exception as e:\n self.logger.error(e)\n loader.add_value('is_dirty', True)\n\n area_dirty = resp.xpath(self.special_fields['area']).extract_first()\n try:\n m = re.search(r'(?P<area>\\d+)\\sm.+', area_dirty)\n float_area = float(m.group('area'))\n loader.add_value('area', float_area)\n except Exception as e:\n self.logger.error(e)\n # parsing error on area is not a cause of dirty item\n\n yield loader.load_item()", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n \n item['promotion'] = not not response.css('.summary .price del')\n if item['promotion']:\n item['old_price'] = response.css('.summary .price del .amount::text').get()\n item['price'] = response.css('.summary .price ins .amount::text').get()\n else:\n item['price'] = response.css('.summary .price .amount::text').get()\n\n item['available'] = not not response.css('.in-stock')\n # e.g. « 10 en stock »\n item['stock'] = response.css('.in-stock::text').get()\n\n # weight\n item['raw_string'] = response.css(\n '.woocommerce-product-details__short-description p::text'\n ).get()\n \n return item", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('.current-price span::attr(content)').get()\n item['available'] = not not response.css('span.font-extra .product-available').get()\n item['promotion'] = not not response.css('.product-discount').get()\n\n\n # weight e.g. : « 0,25 g. NT »\n item['raw_string'] = response.css('div[itemprop~=description]::text').get()\n\n return item", "def parse_item_page_info(self, id, body):\n info = {}\n info['title'] = self.__re_search(body, *self.regx['title'])\n if info['title'] == 'Suggested Products':\n return None\n info['model'] = self.__re_search(body, *self.regx['model'])\n if self.__re_search(body, *self.regx['deactivated']):\n info['deactivated'] = True\n return info\n free_shipping = self.__re_search(body, *self.regx['free_shipping'])\n cart = self.__re_search(body, *self.regx['cart'])\n if free_shipping and not cart:\n info.update(self.parse_item_page_price(id, body))\n return info", "def test_product_detail(self):\n # first performing create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performing detail\n self._detail_model(\"product\", self.product_data, id, [\"name\", \"description\", \"image_link\", \"price\"])\n \n self.assertIsNotNone(id)", "def retrieve_product_infos(self):\n\n # PRODUCT NAME\n try:\n product_name = self.product['product_name'].capitalize()\n except KeyError:\n product_name = None\n\n # PRODUCT CODE\n try:\n product_code = self.product['code'].capitalize()\n except KeyError:\n product_code = None\n\n # URL\n try:\n product_url = self.product['url'].lower()\n except KeyError:\n product_url = None\n\n # IMAGE URL\n try:\n image_url = self.product['image_url'].lower()\n except KeyError:\n image_url = None\n\n # QUANTITY\n try:\n quantity = self.product['quantity'].capitalize()\n except KeyError:\n quantity = None\n\n # INGREDIENTS\n try:\n ingredients = self.product['ingredients_text_fr'].capitalize()\n except KeyError:\n ingredients = None\n\n # BRAND\n brands = []\n try:\n for brand in self.product['brands'].split(','):\n brand = brand.strip().capitalize()\n if (\n brand != ''\n and brand not in brands\n ):\n brands.append(brand)\n except KeyError:\n pass\n\n # STORES\n stores = []\n try:\n for store in self.product['stores'].split(','):\n store = store.strip().capitalize()\n if (\n store != ''\n and store not in stores\n ):\n stores.append(store)\n except KeyError:\n pass\n\n # COUNTRY\n try:\n countries = self.product['countries'].capitalize()\n except KeyError:\n countries = None\n if 'France' in countries:\n countries = 'France'\n else:\n countries = None\n\n # COMPARE TO CATEGORY\n try:\n compare_to = self.product['compared_to_category'].capitalize().split(':')[1]\n except KeyError:\n compare_to = None\n try:\n Categories.objects.get(\n name=compare_to\n )\n except Categories.DoesNotExist:\n compare_to = None\n except:\n importable = False\n\n # CATEGORIES HIERARCHY\n try:\n categories_hierarchy = [\n category.split(':')[1] for category in self.product['categories_hierarchy']\n ]\n except KeyError:\n categories_hierarchy = None\n\n # NUTRISCORE GRADE\n nutriscore_labels = [\n 'nutrition_grade_fr',\n 'nutriscore_grade'\n ]\n nutriscore = 'F'\n i = 0\n while (\n i < len(nutriscore_labels)\n and nutriscore == 'F'\n ):\n try:\n nutriscore = self.product[nutriscore_labels[i]].upper()\n except KeyError:\n i += 1\n\n product_infos = {\n 'product_name': product_name,\n 'product_code': product_code,\n 'product_url': product_url,\n 'image_url': image_url,\n 'quantity': quantity,\n 'ingredients': ingredients,\n 'brands': brands,\n 'stores': stores,\n 'countries': countries,\n 'compare_to': compare_to,\n 'categories_hierarchy': categories_hierarchy,\n 'nutriscore': nutriscore\n }\n\n nutriments = self.product['nutriments']\n for nutriment in self.list_nutriments:\n try:\n product_infos[nutriment] = float(nutriments[nutriment])\n except KeyError:\n product_infos[nutriment] = 0\n\n return product_infos", "def item_details(request, product_id):\n\n item = get_object_or_404(Product, pk=product_id)\n\n context = {\n 'product': item,\n }\n\n return render(request, 'products/item_details.html', context)", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['price'] = response.css('.summary .price bdi::text').get()\n\n item['available'] = not not response.css('.in-stock')\n # e.g. « 10 en stock »\n item['stock'] = response.css('.stock::text').get()\n\n # quantity, e.g. « 15 graines »\n raw_string = response.css(\n '.woocommerce-product-details__short-description p::text'\n ).getall()\n raw_string += response.css('#tab-description td::text').getall()\n item['raw_string'] = ' '.join(raw_string)\n\n return item", "def product_details(request, pk):\n\n products = get_object_or_404(Product, pk=pk)\n product_creator = products.prod_creator_id\n return render(request, 'productdetails.html',\n {'products': products, 'pk': pk,\n 'product_creator': product_creator})", "def parse(self, response):\n product_urls = response.css('.product-details > a::attr(href)').getall()\n for product_url in product_urls:\n yield scrapy.Request(response.urljoin(product_url), self.parse_product)\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url is not None:\n yield scrapy.Request(response.urljoin(next_page_url))", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('h1::text').get()\n item['description'] = response.css('.partiesouligne p::text').get()\n # densité possible\n\n # sous la forme `0.8 g - 3.40 €`\n item['raw_string'] = response.css('.prixsachet::text').get()\n item['price'] = -1\n\n return item", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n item['product_name'] = response.css('.product_title::text').get()\n item['price'] = response.css('.product-information .price bdi::text').get()\n item['currency'] = 'EUR'\n item['description'] = response.css('.woocommerce-product-details__short-description p::text').get()\n\n item['published'] = response.css('meta[property~=\"article:published_time\"]::attr(content)').get() # '2016-03-25T11:15:56Z'\n item['last_modified'] = response.css('meta[property~=\"article:modified_time\"]::attr(content)').get()\n return item", "def _getParentPage(self):\n try:\n page={}\n res=self._getHTML(self.currenturi) # Assuming self.currenturi is at the product page\n self.rawpage=res['result']\n self._setCurrentPage()\n try:\n page['title']= self.parent_page_title\n except:\n page['title']=''\n log.exception(self.log_msg(\"Error occured while fetching product page title\"))\n\n page['data']=''\n try:\n page['ef_product_rating_overall']= float(stripHtml(self.soup.find(text='Overall Rating').parent.parent.parent.\n parent.renderContents()).splitlines()[-1].strip())\n except:\n log.info(self.log_msg(\"Error occured while fetching product page overall rating\"))\n\n try:\n page['ei_product_review_count']= int(self.soup.find(text=re.compile('Total Reviews:')).split(':')[-1].strip())\n except:\n log.info(self.log_msg(\"Error occured while fetching number of reviews for the product\"))\n\n try:\n if self.__product_price:\n page['et_product_price']= self.__product_price\n self.updateProductPrice(page.get('et_product_price'))\n except:\n log.info(self.log_msg(\"Error occured while fetching product price\"))\n \n log.debug(self.log_msg('got the content of the product main page'))\n log.debug(self.log_msg('checking session info'))\n try:\n post_hash = md5.md5(''.join(sorted(map(lambda x: str(x) if isinstance(x,(int,float)) else x , \\\n page.values()))).encode('utf-8','ignore')).hexdigest()\n except:\n log.exception(self.log_msg(\"Exception occured while creating post hash for the page %s, returning\" %self.currenturi))\n return False\n if not checkSessionInfo(self.genre, self.session_info_out, \n self.task.instance_data['uri'], self.task.instance_data.get('update')):\n id=None\n if self.session_info_out=={}:\n id=self.task.id\n log.debug(id)\n result=updateSessionInfo(self.genre, self.session_info_out, self.task.instance_data['uri'], post_hash, \n 'Post', self.task.instance_data.get('update'), Id=id)\n if result['updated']:\n page['parent_path'] = []\n page['path'] = [self.task.instance_data['uri']]\n page['task_log_id']=self.task.id\n page['versioned']=self.task.instance_data.get('versioned',False)\n page['category']=self.task.instance_data.get('category','generic')\n page['last_updated_time']= datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\") #Now\n page['client_name']=self.task.client_name\n page['entity']='post'\n page['uri'] = normalize(self.currenturi)\n page['uri_domain'] = unicode(urlparse(self.currenturi)[1])\n page['priority']=self.task.priority\n page['level']=self.task.level\n page['pickup_date'] = datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\") #Now\n page['posted_date'] = datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\") # As the Product page does not have a posting date, keeping the posting date identical to pickup date\n page['connector_instance_log_id'] = self.task.connector_instance_log_id\n page['connector_instance_id'] = self.task.connector_instance_id\n page['workspace_id'] = self.task.workspace_id\n page['client_id'] = self.task.client_id # TODO: Get the client from the project\n self.pages.append(page)\n log.debug(self.log_msg(\"product review main page details stored\"))\n return True\n else:\n log.debug(self.log_msg(\"product review main page details NOT stored\"))\n return False\n else:\n log.debug(self.log_msg(\"product review main page details NOT stored\"))\n return False\n except:\n log.exception(self.log_msg(\"Exception occured in _getParentPage()\"))\n return False #Not raised deliberately", "def parse_product(self, response):\n item = ProductItem()\n item['url'] = response.url\n item['vendor'] = parse_url(response.url).netloc\n\n \n data = response.css('script[type~=\"application/ld+json\"]::text').get()\n # Removes indent\n data = re.sub(r'[\\n\\t]', '', data)\n data = eval(data)\n \n item['product_name'] = data['name']\n item['price'] = data['offers']['price']\n item['currency'] = data['offers']['priceCurrency']\n item['product_id'] = data['productID']\n \n # seed number\n item['raw_string'] = response.css('.seed-number::text').get().strip()\n return item", "def test_get_product_detail(self):\n\n response = self.client.get(reverse('website:product_details', args=(1,)))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Product title appears in HTML response content\n self.assertIn('<h1>Test Product</h1>'.encode(), response.content)\n self.assertNotIn('<h1>Test Product2</h1>'.encode(), response.content)" ]
[ "0.7332074", "0.70028687", "0.682712", "0.66167986", "0.655458", "0.6486167", "0.6345658", "0.6335755", "0.6321414", "0.6278564", "0.627526", "0.6249362", "0.6188311", "0.6187878", "0.6165495", "0.6099834", "0.60969275", "0.6072641", "0.60128343", "0.60072505", "0.5989429", "0.59727764", "0.5960655", "0.59466004", "0.5941034", "0.5935654", "0.5932243", "0.5913964", "0.5903703", "0.5883223" ]
0.79251844
0
Convert local time to stock time.
def __get_stock_time(stock_tz: timezone) -> datetime: return datetime.now().astimezone(stock_tz)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timestampfromlocal(local):\n return local.timestamp()", "def local_time():\n return datetime.datetime.now().isoformat()[:len('2017-01-24T10:44:00')]", "def local_time(self) -> SmartNvmeLocalTime:\n return self._local_time", "def local_time(self, local_time: SmartNvmeLocalTime):\n\n self._local_time = local_time", "def localize_time(self, apitime):\n return self.feedzone.localize(apitime).astimezone(self.localzone)", "def to_local_time(self, tweet_timestamp):\n timestamp = mktime_tz(parsedate_tz(tweet_timestamp))\n return datetime.fromtimestamp(timestamp)", "def UTCtimestampTolocal(ts, local_tz):\n t_utc = dt.datetime.fromtimestamp(ts,tz=pytz.timezone(\"UTC\"))\n t_local = t_utc.astimezone(local_tz)\n return t_local", "def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)", "def utc_to_local(t):\n # set utc tzinfo\n t = t.replace(tzinfo=tz.tzutc())\n # convert to local timezone\n return t.astimezone(tz.tzlocal())", "def utc_to_local_datetime(dt):\n return dt.astimezone(LOCAL)", "def get_local(utc_time, tz):\n utc_tz = pytz.utc\n utc_now = datetime.utcnow().replace(tzinfo=utc_tz)\n utc_alarm = utc_now.replace(hour=utc_time.hour, minute=utc_time.minute)\n local_tz = pytz.timezone(tz)\n local_alarm = local_tz.normalize(utc_alarm)\n return local_alarm.time()", "def apply_timezone_datetime(_local_tz: str, _time: datetime.time):\n return pytz.timezone(_local_tz).localize(\n datetime.datetime.combine(datetime.datetime.now().date(), _time)\n )", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def now(time):\n a = datetime.fromtimestamp(time).strftime('%Y-%m-%d %H:%M:%S')\n return a", "def timestamp_to_local(timestamp):\n naive = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=pytz.utc)\n return naive.astimezone(_tz)", "def date_to_local(self, date):\n if date.tzinfo is not None:\n # date is timezone-aware\n date = date.astimezone(self.tz_local)\n\n else:\n # date is a naive date: assume expressed in UTC\n date = date.replace(tzinfo=self.tz_utc)\n # and converted to local time\n date = date.astimezone(self.tz_local)\n\n return date", "def ntp_to_system_time(date):\n return date - NTP_DELTA", "def gps2Time(self):\n self.posting_date = Time(self.posting_gpstime, format=\"gps\")", "def _get_time(self): \n\t\t# need to variable-ize the version ??? \n\t\ttime = self.root.find('.//{http://www.opengis.net/kml/2.2}when').text\n\t\t## strip off last 5 chars, ie '.135Z in '2015-08-01T00:06:29.135Z'\n\t\tutc = dateutil.tz.tzutc() \n\t\tcentral = dateutil.tz.gettz('America/Chicago')\n\t\ttime = datetime.datetime.strptime(time[:-5], '%Y-%m-%dT%H:%M:%S')\n\t\ttime = time.replace(tzinfo=utc)\n\t\tself.time = time.astimezone(central)", "def get_datetime(ts, local_time=True):\n tsf = float(ts) / 1000\n timev = time.localtime(tsf) if local_time else time.gmtime(tsf)\n dt = datetime.datetime.fromtimestamp(time.mktime(timev))\n return dt", "def utc_to_local_timestamp(ts, orig_tz=UTC):\n timestamp = datetime.datetime.fromtimestamp(ts,tz=orig_tz)\n return timestamp.astimezone(LOCAL)", "def _time_str(self):\n try:\n if not self._time:\n raise ValueError\n format_ = '%a, %d %b %Y %H:%M:%S'\n return datetime.fromtimestamp(float(self._time)).strftime(format_)\n except ValueError:\n return plastic_date()", "def convert_to_localtime(dt):\n tz = pytz.timezone('Europe/Stockholm')\n dt = dt.replace(tzinfo=pytz.utc)\n dt = dt.astimezone(tz)\n return dt", "def _get_time(self):\n # get the current time in UTC (make sure we are timezone aware)\n now_utc = datetime.datetime.now(pytz.UTC)\n \n # convert to our local timezone\n timenow = now_utc.astimezone(self.timezone)\n \n # save the data to our data\n self.data['year'][0] = timenow.year\n self.data['month'][0] = timenow.month\n self.data['day'][0] = timenow.day\n self.data['hour'][0] = timenow.hour\n self.data['minute'][0] = timenow.minute\n self.data['second'][0] = timenow.second\n \n return", "def convertToLocalTime(zone, when=None):\n if when is None:\n when = datetime.today()\n\n offset = getTimeZoneOffset(zone, when)\n if offset >= 0:\n when = when - timedelta(hours=offset)\n else:\n when = when + timedelta(hours=offset)\n return when", "def SendLocalTime( self, ms_time = None ): \r\n\r\n if ms_time is None : \r\n ms_time = ms_localtime()\r\n\r\n message = self._fmt.pack( 'T', ms_time ) \r\n\r\n ## # debug \r\n ## print message, struct.unpack('=L', message[1:]) \r\n\r\n self._socket.write( message ) \r\n\r\n return self.GetServerResponse()", "def localfromtimestamp(timestamp):\n utctime = datetime.utcfromtimestamp(timestamp)\n offset = datetime.fromtimestamp(timestamp) - utctime\n return utctime + offset", "def utc2local(utc_dt):\n return datetime.fromtimestamp(timegm(utc_dt.timetuple()))", "def localized_date_time(timestring):\n date_time = arrow.get(timestring)\n local_date = date_time.strftime(xbmc.getRegion(\"dateshort\"))\n local_time = date_time.strftime(xbmc.getRegion(\"time\").replace(\":%S\", \"\"))\n return local_date, local_time", "def local_datetime(rdd_tuple):\n timezone = rdd_tuple[0].split(\" ; \")[2]\n utc_time = rdd_tuple[2]\n city_key = rdd_tuple[0][:-(len(timezone) + 3)]\n temperature = rdd_tuple[1]\n local_dt = utils.locutils.convert_timezone(utc_time, timezone)\n return city_key, temperature, local_dt" ]
[ "0.6390957", "0.62333965", "0.61749357", "0.61194736", "0.6035789", "0.60283273", "0.5905837", "0.58786947", "0.58271265", "0.57655567", "0.5695081", "0.56771725", "0.5636829", "0.5636829", "0.5608826", "0.5582738", "0.5571458", "0.5569241", "0.5568407", "0.5536695", "0.5466117", "0.5435753", "0.54344106", "0.5403808", "0.53891957", "0.53842926", "0.5379149", "0.5368219", "0.53637046", "0.53421926" ]
0.6539263
0
Get the difference between stock price of yesterday and the day before yesterday. Information is gotten from Alpha Vantage API.
def get_stock_difference(stock_symbol: str) -> float: av_params = { "function": "TIME_SERIES_DAILY", "symbol": stock_symbol, "apikey": config.AV_API_KEY } response = requests.get("https://www.alphavantage.co/query", params=av_params) response.raise_for_status() stock_daily_data = response.json() stock_timezone = timezone(stock_daily_data["Meta Data"]["5. Time Zone"]) print(stock_daily_data) stock_t = __get_stock_time(stock_timezone) yesterday_stock_t = __get_date_days_shift(stock_t, 1) two_days_ago_stock_t = __get_date_days_shift(stock_t, 2) yesterday_close = float( stock_daily_data["Time Series (Daily)"][yesterday_stock_t.strftime("%Y-%m-%d")]["4. close"] ) two_days_ago_close = float( stock_daily_data["Time Series (Daily)"][two_days_ago_stock_t.strftime("%Y-%m-%d")]["4. close"] ) different = round(yesterday_close - two_days_ago_close, 2) return round(different * 100 / yesterday_close, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_power_consumption_yesterday(self):\n return self.power_consumption_yesterday", "def previous_date(self):\n yesterday = pendulum.yesterday('UTC')\n last_update = self.storage.last_update(self.feed)\n if not last_update or last_update < yesterday:\n last_update = yesterday\n return last_update", "def price_diff(self):\n try:\n return(self.price_close - self.price_open)\n except:\n return", "def yesterday():\n return datetime.today() - timedelta(1)", "def getFullPriceHistory(self, stockSymbol, stockExchange):\n response = requests.get(\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}:{}&outputsize=full&apikey={}\".format(\n stockExchange, stockSymbol, self.ALPHA_VANTAGE_SECRET_KEY))\n data = response.json()\n timestamps, aClose = [], []\n for key in data['Time Series (Daily)']:\n timestamps.append(key)\n dates = [datetime.strptime(\n ts, \"%Y-%m-%d\") for ts in timestamps]\n dates.sort()\n dates.reverse()\n Dates = [datetime.strftime(ts, \"%Y-%m-%d\") for ts in dates]\n for date in Dates:\n aClose.append(\n float(data['Time Series (Daily)'][date]['5. adjusted close']))\n return (Dates, aClose)", "def prev(self):\n return self.from_date(self.date_a - datetime.timedelta(1))", "def getCompactPriceHistory(self, stockSymbol, stockExchange):\n response = requests.get(\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}:{}&apikey={}\".format(\n stockExchange, stockSymbol, self.ALPHA_VANTAGE_SECRET_KEY))\n data = response.json()\n timestamps, aClose = [], []\n\n for key in data['Time Series (Daily)']:\n timestamps.append(key)\n dates = [datetime.strptime(\n ts, \"%Y-%m-%d\") for ts in timestamps]\n dates.sort()\n dates.reverse()\n Dates = [datetime.strftime(ts, \"%Y-%m-%d\") for ts in dates]\n for date in Dates:\n aClose.append(\n float(data['Time Series (Daily)'][date]['5. adjusted close']))\n return (Dates, aClose)", "def getDatePrice(self):\n return self.getHistorical().ix[:,[0,5]]", "def price_diff_d(self):\n try:\n return(self.direction*(self.price_close - self.price_open))\n except:\n return", "def yesterday(self):\r\n return RecordsYesterday(self)", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def history(self, t_minus=0):\n data = self.ohlcv_df[self.ohlcv_df.index <= utc_to_epoch(\n self.prior_time)]\n return OHLCVData(data[-t_minus:])", "def get_stock_price(stock):\n pass", "def price_diff_rel_d(self): \n try:\n return(self.price_diff_d / self.price_open)\n except:\n return", "def getClosingPrice(self):\t\n\t\treturn self.dataPoints[-1].getDate(), self.dataPoints[-1].getAdjustedValue()", "def getDateVolume(self):\n return self.getHistorical().ix[:,[0,6]]", "def get_daily_vol(close, lookback=100):\n\n # daily vol re-indexed to close\n df0 = close.index.searchsorted(close.index - pd.Timedelta(days=1))\n df0 = df0[df0 > 0]\n df0 = (pd.Series(close.index[df0 - 1], index=close.index[close.shape[0] - df0.shape[0]:]))\n\n df0 = close.loc[df0.index] / close.loc[df0.values].values - 1 # daily returns\n df0 = df0.ewm(span=lookback).std()\n return df0", "def exchange_value(self,stockvalue):\n if self.call:\n return max(0, stockvalue - self.strike)\n else:\n return max(0, self.strike - stockvalue)", "def price_diff_rel(self): \n try:\n return(self.price_diff / self.price_open)\n except:\n return", "def get_yesterday(x: Optional[Date] = None) -> Date:\n return (x or get_today()) - TimeDelta(days=1)", "def get_max_increase_from_yesterday():\n start = datetime.now() - timedelta(2)\n end = datetime.now()\n f = open(\"nasdaqtraded.txt\")\n stock_symbols, nasdaq_stock_symbols = extract_symbols(f)\n\n result = {}\n\n for i in range(len(nasdaq_stock_symbols)):\n symbol = nasdaq_stock_symbols[i]\n if isinstance(symbol, str):\n if not symbol.isalpha():\n continue\n else:\n continue\n\n try:\n df = get_data(start, end, symbol)\n except:\n continue\n\n if len(df['Close'].tolist()) != 2:\n continue\n yes, td = df['Close'].tolist()[:]\n\n inc = (td - yes) / yes\n if len(result) < 100:\n result[symbol] = inc\n else:\n min = list(result.keys())[0]\n for key in result:\n if result[key] < result[min]:\n min = key\n if result[min] > inc:\n result.pop(min)\n result[symbol] = inc\n print(result)\n return result", "def test_date_accept_yesterday(self):\n import dateutil.relativedelta\n spi_search = \"find date yesterday\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today()+dateutil.relativedelta.relativedelta(days=-1), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)", "def previous(self):\n posts_by_date = self.posts_by_date\n index = bisect.bisect_left(posts_by_date, self)\n if index == 0:\n return None\n return posts_by_date[index - 1]", "def get_yesterday() -> tuple:\n logging.debug(\"Starting get_yesterday function.\")\n today = datetime.now(pytz.timezone(\"America/New_York\"))\n yesterday = (today - timedelta(days=1)).strftime(\"%Y-%m-%d\")\n yesterday_split = yesterday.split(\"-\")\n year = yesterday_split[0]\n month = yesterday_split[1]\n day = yesterday_split[2]\n\n return year, month, day", "def yesterdayDate(self):\n yesterday = time.time() - 24*3600\n return time.strftime(\"%m/%d/%Y\", time.localtime(yesterday))", "def getLatestPercentageChange(self, stockSymbol, stockExchange, currentDate):\n Dates, Prices = self.getCompactPriceHistory(\n stockSymbol, stockExchange)\n if (Dates[0] == currentDate):\n latestPercentageChange = self.calculateCurrentPercentageChange(\n Prices)\n return latestPercentageChange\n else:\n return None", "def stock_disponible(self):\n return self.stock - self.stock_reservado", "def get_price(data):\n return data[\"summaryDetail\"][\"regularMarketPreviousClose\"][\"raw\"]", "def low_stock_date(self):\n return self._low_stock_date", "def delta(self) -> timedelta:\n delta = self.data.get(\"delta\", 0)\n return timedelta(seconds=delta)" ]
[ "0.68710786", "0.6384263", "0.6358718", "0.6214671", "0.60386467", "0.6026378", "0.60053504", "0.58787036", "0.5804829", "0.574149", "0.5725666", "0.5675212", "0.5655568", "0.5632252", "0.56037617", "0.55869097", "0.5582911", "0.55560523", "0.5548148", "0.55467075", "0.5543996", "0.5439815", "0.54268926", "0.54132855", "0.54037637", "0.53673947", "0.5342685", "0.5331937", "0.5329307", "0.53189915" ]
0.7740488
0
Invert the bits in the bytestring `s`. This is used to achieve a descending order for blobs and strings when they are part of a compound key, however when they are stored as a 1tuple, it
def invert(s): return s.translate(INVERT_TBL)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reverseString1(self, s):\n for i in range(len(s)//2):\n s[i], s[~i] = s[~i], s[i]", "def reverseComplement(s):\n\tcomplement = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N'}\n\tt = ''\n\tfor base in s:\n\t\tt = complement[base] + t\n\treturn t", "def reverseString(self, s):\n for i in range(len(s)//2):\n s[i], s[-(i+1)] = s[-(i+1)], s[i]", "def reverseString(self, s: List[str]) -> None:\n size = len(s)\n for i in range(size//2):\n s[i], s[~i] = s[~i], s[i]\n # s[i], s[size-i-1] = s[size-i-1], s[i]\n\n # s[:] = s[::-1]", "def inv_sub_bytes(state):\n sub_bytes(state, s_box=inv_s_box)", "def binary_reversal(string):\n\tbinary = bin(int(string))\n\tnew_binary = binary.replace('b', '0')\n\tnew_binary = new_binary[::-1]\n\t\n\tdec = int(new_binary, 2)\n\tprint dec", "def compress_seq(s: str):\n bits = 64\n assert len(s) <= (bits / 2 - 1)\n result = 0\n for nuc in s:\n if nuc not in NUCS_INVERSE:\n return 1 << (bits - 1)\n result = result << 2\n result = result | NUCS_INVERSE[nuc]\n return result", "def crypto_core_ed25519_scalar_invert(s: bytes) -> bytes:\n ensure(\n has_crypto_core_ed25519,\n \"Not available in minimal build\",\n raising=exc.UnavailableError,\n )\n\n ensure(\n isinstance(s, bytes) and len(s) == crypto_core_ed25519_SCALARBYTES,\n \"Integer s must be a {} long bytes sequence\".format(\n \"crypto_core_ed25519_SCALARBYTES\"\n ),\n raising=exc.TypeError,\n )\n\n r = ffi.new(\"unsigned char[]\", crypto_core_ed25519_SCALARBYTES)\n\n rc = lib.crypto_core_ed25519_scalar_invert(r, s)\n ensure(rc == 0, \"Unexpected library error\", raising=exc.RuntimeError)\n\n return ffi.buffer(r, crypto_core_ed25519_SCALARBYTES)[:]", "def decipher_raw(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n e = [decrypt(u[i], u[i + 1], key) for i in range(len(u))[::2]]\n return b''.join([struct.pack('2I', ee, ef) for ee, ef in e])", "def reverse_byte_order_negative(result):\n bit_string = \"\"\n\n # Slice off the negative sign\n result = result[1:len(result) + 1]\n\n while len(result) > 0:\n cur_nybble = result[0:2]\n bit_string = cur_nybble + bit_string\n result = result[3:len(result) + 1]\n\n if len(result) != 0:\n bit_string = \" \" + bit_string\n\n # Just need to prepend the negative sign now\n bit_string = \"-\" + bit_string\n return bit_string", "def reverseString(s):\n for i in range(len(s)//2):\n t = s[i]\n s[i] = s[len(s)-i-1]\n s[len(s)-i-1] = t", "def reverse(self, s):\n return '\\x16%s\\x16' % s", "def find_flipped_bit(s1, s2):\n if len(s1) == 0 or len(s2) == 0:\n raise ValueError(\"Empty string inputted.\")\n\n if len(s1) != len(s2):\n raise ValueError(\"Strings compared in gray code must have the same length.\")\n \n if any([x != \"0\" and x != \"1\" for x in s1]) or any([x != \"0\" and x != \"1\" for x in s2]):\n raise ValueError(f\"One of inputs {s1}, {s2} is not a valid binary string.\")\n \n # Sum the strings elementwise modulo 2; the sum will be 1 only in the slot \n # where we flipped a bit \n string_sums = [(int(s1[i]) + int(s2[i])) % 2 for i in range(len(s1))]\n\n if string_sums.count(1) == 0:\n raise ValueError(f\"Strings {s1} and {s2} are the same.\")\n elif string_sums.count(1) > 1:\n raise ValueError(f\"Strings {s1} and {s2} are not ordered in a gray code.\")\n\n return string_sums.index(1)", "def reverse_byte_order_positive(result):\n bit_string = \"\"\n while len(result) > 0:\n cur_nybble = result[0:2]\n bit_string = cur_nybble + bit_string\n result = result[3:len(result) + 1]\n\n if len(result) != 0:\n bit_string = \" \" + bit_string\n\n return bit_string", "def reverseString(self, s: list[str]) -> None:\n for index in range(len(s)//2):\n s[index], s[-1-index] = s[-1-index], s[index]", "def reverse_string(s):\n s.reverse()", "def string2bits(s=''):\n return [bin(ord(x))[2:].zfill(8) for x in s]", "def reverseComplement(string):\n rMap = { \"A\":\"T\", \"T\":\"A\", \"C\":\"G\", \"G\":\"C\", \"N\":\"N\"}\n return \"\".join(rMap[i] for i in string[::-1])", "def reverseString(self, s) -> None:\n i = 0\n j = len(s) - 1\n while i < j:\n temp = s[i]\n s[i] = s[j]\n s[j] = temp\n i += 1\n j -= 1", "def revComp(s):\n d = {\"A\": \"T\", \"C\": \"G\", \"G\": \"C\", \"T\": \"A\", \"N\": \"N\"}\n s = s[::-1]\n x = [d[c] for c in s]\n return \"\".join(x)", "def reverseString(self, s: list[str]) -> None:\n l, r = 0, len(s)-1\n while l < r:\n s[l], s[r] = s[r], s[l]\n l += 1\n r -= 1", "def mirror(s):\n mir_str = s\n for i in range(1, len(s) + 1):\n mir_str += s[-i]\n return mir_str", "def s2b (s):\n return s.encode()", "def invert_inplace(a):", "def reverseString(self, s) -> None:\n # n=len(s)\n # for i in range(int(n/2)):\n # s[i],s[n-1-i]=s[n-1-i],s[i]\n s=s[::-1]\n print(s)", "def convertLittleBig(string):\n t = bytearray.fromhex(string)\n t.reverse()\n return ''.join(format(x, '02x') for x in t)", "def invert_board_state(board_state):\n return tuple(tuple(-board_state[j][i] for i in range(len(board_state[0]))) for j in range(len(board_state)))", "def reverseString(self, s) -> None:\n # 방법 1\n s.reverse()\n # 방법 2\n # half_len = int(len(s) / 2)\n # for i in range(half_len):\n # temp = s[i]\n # s[i] = s[len(s) - 1 - i]\n # s[len(s) - 1 - i] = temp", "def decodebytes(s):\n\n decoded = decode(s)\n buf = bytearray()\n while decoded > 0:\n buf.append(decoded & 0xff)\n decoded //= 256\n buf.reverse()\n\n return bytes(buf)", "def reverseString(self, s: List[str]) -> None:\n front = 0\n back = len(s) - 1\n\n while front < back:\n s[front], s[back] = s[back], s[front]\n front += 1\n back -= 1" ]
[ "0.66461504", "0.6160725", "0.6132901", "0.60731655", "0.5983622", "0.594542", "0.59285384", "0.59088534", "0.5840628", "0.5825385", "0.5814925", "0.57725865", "0.5744192", "0.5703233", "0.56617624", "0.56421524", "0.5639878", "0.5639233", "0.5598104", "0.55823743", "0.55560523", "0.5551096", "0.5551013", "0.5538903", "0.5516356", "0.550998", "0.5480473", "0.54784244", "0.5468352", "0.5439247" ]
0.66995716
0
Given a bytestring `s`, return the most compact bytestring that is greater than any value prefixed with `s`, but lower than any other value.
def next_greater(s): assert s # Based on the Plyvel `bytes_increment()` function. s2 = s.rstrip('\xff') return s2 and (s2[:-1] + chr(ord(s2[-1]) + 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minWindow(self, s: str, t: str) -> str:\n ct = Counter(t)\n right = left = 0\n length = inf\n res = \"\"\n needed_for_t = len(t)\n for right, ch in enumerate(s):\n if ch in ct:\n if ct[ch] > 0:\n needed_for_t -= 1\n ct[ch] -= 1\n while needed_for_t == 0:\n if right - left + 1 < length:\n length = right - left + 1\n res = s[left: right + 1]\n if s[left] in ct:\n ct[s[left]] += 1\n if ct[s[left]] > 0:\n needed_for_t += 1\n left += 1\n return res", "def minFlipsMonoIncr2(self, s: str) -> int:\n l = len(s)\n\n def add_str(a, b) -> int:\n return int(a) + int(b)\n\n # break_point_location for break line between s\n # like | 0 | 1 | 0 | 0 |\n # the first one is keep for\n bpl = list(itertools.accumulate(s, func=add_str, initial=0))\n return min([bpl[i] + (l - i - (bpl[-1] - bpl[i])) for i in range(l + 1)])", "def smaller(s):\n if len(s) == 1:\n return s\n if s[0] > s[-1]:\n return smaller(s[1:])\n return smaller(s[:-1])", "def smallest_substring_linear(S, _set):\n frequency = Counter()\n min_len = len(S)\n min_substr = None\n start = end = 0\n while end < len(S):\n # Expand until we start controlling\n # also maintain the frequency\n while len(frequency) != len(_set):\n if S[end] in _set:\n frequency[S[end]] += 1\n end += 1\n\n if end == len(S):\n break\n\n if end == len(S):\n break\n\n # Shrink from the left\n while start < len(S) and len(frequency) == len(_set):\n if S[start] in _set:\n frequency[S[start]] -= 1\n\n if frequency[S[start]] == 0:\n # we just stopped controlling\n frequency.pop(S[start])\n\n start += 1\n\n # keep track of smallest substring\n temp = S[start - 1:end]\n if len(temp) < min_len:\n min_len = len(temp)\n min_substr = temp\n\n return min_substr", "def minFlipsMonoIncr(self, s: str) -> int:\n\n def __minFMI(s, head: str) -> int:\n if len(s) == 1:\n # last one, no need to flip if head is 0 or s[0] same as head\n if s[0] == head or head == '0':\n return 0\n else:\n return 1\n if s[0] == head:\n # head and s[0] are same, no need to flip as we want minimum flips\n return __minFMI(s[1:], head)\n elif head == '0': # head = 0 and s[0] = 1, has two choices\n # first arg is flip s[0] from 1 to 0, last arg is keep s[0] as 1\n # and head changed to 1\n return min(__minFMI(s[1:], '0') + 1, __minFMI(s[1:], '1'))\n else:\n # head = 1 and s[0] = 0, and must flip s[0] to 1\n return __minFMI(s[1:], '1') + 1\n\n # assume the virtual head is 0\n return __minFMI(s, '0')", "def get_encoded_minhash(string: str) -> str:\n return encode_minhash(compute_minhash(string))", "def minhash_containment(s, t):\n return len(s & t) / len(s)", "def stringConstruction(s):\n p = ''\n i = 0\n mincost = 0\n while p != s:\n if s[i] in p:\n p = p + s[i]\n # no cost since it is substring of p\n else:\n p = p + s[i]\n mincost += 1\n i += 1\n return mincost", "def compress_seq(s: str):\n bits = 64\n assert len(s) <= (bits / 2 - 1)\n result = 0\n for nuc in s:\n if nuc not in NUCS_INVERSE:\n return 1 << (bits - 1)\n result = result << 2\n result = result | NUCS_INVERSE[nuc]\n return result", "def removeOneDigit(self, s:str, t:str) -> int:\n count = 0\n for i in range(len(s)):\n if s[:i] + s[i + 1:] > t:\n # print(s[:i] + s[i + 1:], t)\n count = count + 1\n for j in range(len(t)):\n if s > t[:j] + t[j + 1:]:\n # print(s, t[:j] + t[j + 1:])\n count = count + 1\n\n return count", "def find_extrema(s):\n max_env = np.logical_and(\n np.r_[True, s[1:] > s[:-1]],\n np.r_[s[:-1] > s[1:], True])\n min_env = np.logical_and(\n np.r_[True, s[1:] < s[:-1]],\n np.r_[s[:-1] < s[1:], True])\n max_env[0] = max_env[-1] = False\n\n #exclude endpoints\n mini = [m for m in min_env.nonzero()[0] if m != 0 and m != len(s)-1]\n maxi = [m for m in max_env.nonzero()[0] if m != 0 and m != len(s)-1]\n\n return mini,maxi", "def calc_min_cost(string: str) -> int:\n cost = 0\n\n if not string:\n return 0\n\n characters = set()\n\n for char in string:\n if char not in characters:\n cost += 1\n characters.add(char)\n\n return cost", "def bit_smarter(limit):\n c_lengths = {}\n\n for s in range(1, limit+1):\n c_lengths[s] = s_collatz_length(s, c_lengths)\n\n return max(c_lengths, key=lambda x: c_lengths[x])", "def theLoveLetterMystery(s):\n mincount = 0\n for i in range(len(s) // 2):\n mincount += abs(ord(s[i]) - ord(s[-1 - i]))\n\n return mincount", "def get_next_smaller(self, lookup_string: str) -> Optional[SupportsStr]:\n ...", "def human2bytes(s):\n if s is None:\n return None\n try:\n return int(s)\n except ValueError:\n symbols = 'BKMGTPEZY'\n letter = s[-1:].strip().upper()\n num = float(s[:-1])\n prefix = {symbols[0]: 1}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i+1)*10\n return int(num * prefix[letter])", "def worst_csq_from_csq(csq):\n return rev_csq_order_dict[worst_csq_index(csq.split('&'))]", "def __call__(self, string):\n import jieba\n str_list = list(jieba.cut(string, cut_all = False))\n return self.tbl.most_likely(str_list)", "def get_max_character(strings):\n m=0\n for string in strings:\n for char in string:\n if char>m:\n m=char\n return m", "def fuzzify(s, u):\n f_s = s @ u.T\n m_s = np_max(f_s, axis=0)\n m_s = np.maximum(m_s, 0, m_s)\n return m_s", "def getSubstring(str):\n\tresultStr=[]\n\ttempStr=\"\"\n\tfor i in range(len(str)-1):\n\t\t#result[a]=a in result and result[a]+1 or 1\n\t\ttempStr=str[i]\n\t\tfor b in str[i+1:]:\n\t\t\tif b in tempStr:\n\t\t\t\tbreak\n\t\t\telse: tempStr+=b\n\n\t\tresultStr.append(tempStr)\n\treturn sorted(resultStr,key=lambda x:len(x),reverse=True)[0]", "def min_window(s, t):\n if len(s) == 0 or len(t) == 0:\n return ''\n elif len(t) == 1:\n if t[0] in s:\n return t\n else:\n return ''\n\n index_tuples = filtered_index_tuples(s, t)\n if len(index_tuples) < len(t):\n return ''\n\n count_dict = defaultdict(lambda: 0)\n target_dict = str_to_count_dict(t)\n start = 0\n end = 0\n count_dict[index_tuples[start][1]] += 1\n best = (-1, len(s)) # nonsensical\n while end < len(index_tuples): #or count_dict_covers_target(count_dict, t):\n covers_target = count_dict_covers_target(count_dict, target_dict)\n\n if covers_target:\n if index_tuples[end][0] - index_tuples[start][0] < best[1] - best[0]:\n best = (index_tuples[start][0], index_tuples[end][0])\n\n count_dict[index_tuples[start][1]] -= 1\n start += 1\n else:\n end += 1\n if end < len(index_tuples):\n count_dict[index_tuples[end][1]] += 1\n\n if best[0] == -1:\n return ''\n else:\n return s[best[0]:best[1] + 1]", "def human2bytes(s):\n init = s\n num = \"\"\n while s and s[0:1].isdigit() or s[0:1] == '.':\n num += s[0]\n s = s[1:]\n num = float(num)\n letter = s.strip()\n for name, sset in SYMBOLS.items():\n if letter in sset:\n break\n else:\n if letter == 'k':\n # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs\n sset = SYMBOLS['customary']\n letter = letter.upper()\n else:\n raise ValueError(\"can't interpret %r\" % init)\n prefix = {sset[0]: 1}\n for i, s in enumerate(sset[1:]):\n prefix[s] = 1 << (i + 1) * 10\n return int(num * prefix[letter])", "def alphabet_minimize(self):\n\n # We perform our normalization in a random order. This helps give\n # us a good mix of likely to succeed (e.g. rare bytes) vs likely\n # to have a large impact (e.g. common bytes) without having to\n # have any idea which bytes are which.\n all_bytes = list(hrange(256))\n self.random.shuffle(all_bytes)\n\n for c in all_bytes:\n buf = self.buffer\n\n if c not in buf:\n continue\n\n def can_replace_with(d):\n if d < 0:\n return False\n\n if self.consider_new_buffer(hbytes([d if b == c else b for b in buf])):\n if d <= 1:\n # For small values of d if this succeeds we take this\n # as evidence that it is worth doing a a bulk replacement\n # where we replace all values which are close\n # to c but smaller with d as well. This helps us substantially\n # in cases where we have a lot of \"dead\" bytes that don't really do\n # much, as it allows us to replace many of them in one go rather\n # than one at a time. An example of where this matters is\n # test_minimize_multiple_elements_in_silly_large_int_range_min_is_not_dupe\n # in test_shrink_quality.py\n def replace_range(k):\n if k > c:\n return False\n\n def should_replace_byte(b):\n return c - k <= b <= c and d < b\n\n return self.consider_new_buffer(\n hbytes(\n [d if should_replace_byte(b) else b for b in buf]\n )\n )\n\n find_integer(replace_range)\n return True\n\n if (\n # If we cannot replace the current byte with its predecessor,\n # assume it is already minimal and continue on. This ensures\n # we make no more than one call per distinct byte value in the\n # event that no shrinks are possible here.\n not can_replace_with(c - 1)\n # We next try replacing with 0 or 1. If this works then\n # there is nothing else to do here.\n or can_replace_with(0)\n or can_replace_with(1)\n # Finally we try to replace with c - 2 before going on to the\n # binary search so that in cases which were already nearly\n # minimal we don't do log(n) extra work.\n or not can_replace_with(c - 2)\n ):\n continue\n\n # Now binary search to find a small replacement.\n\n # Invariant: We cannot replace with lo, we can replace with hi.\n lo = 1\n hi = c - 2\n while lo + 1 < hi:\n mid = (lo + hi) // 2\n if can_replace_with(mid):\n hi = mid\n else:\n lo = mid", "def compress_v2(string):\n\n result = \"\"\n\n l = len(string)\n\n # Edge cases\n if l == 0:\n return \"\"\n\n if l == 1:\n return string + \"1\"\n\n last = string[0]\n count = 1\n i = 1\n\n while i < l:\n if string[i] == string[i-1]:\n count += 1\n else:\n result = result + string[i-1] + str(count)\n count = 1\n\n i += 1\n\n # For the last letter\n result = result + string[i-1] + str(count)\n\n return result", "def _parse_bytes(s):\n if isinstance(s, (int, float)):\n return int(s)\n s = s.replace(\" \", \"\")\n if not any(char.isdigit() for char in s):\n s = \"1\" + s\n\n for i in range(len(s) - 1, -1, -1):\n if not s[i].isalpha():\n break\n index = i + 1\n\n prefix = s[:index]\n suffix = s[index:]\n\n try:\n n = float(prefix)\n except ValueError as e:\n raise ValueError(\n \"Could not interpret '%s' as a number\" % prefix\n ) from e\n\n try:\n multiplier = BYTE_SIZES[suffix.lower()]\n except KeyError as e:\n raise ValueError(\n \"Could not interpret '%s' as a byte unit\" % suffix\n ) from e\n\n result = n * multiplier\n return int(result)", "def stringCompression(s):\n\n orig_len = len(s)\n t = []\n current_letter = s[0]\n count = 1\n\n for i in range(1, orig_len):\n if s[i] == current_letter:\n count += 1\n if i == orig_len - 1:\n t.append(current_letter + str(count))\n else:\n t.append(current_letter + str(count))\n current_letter = s[i]\n count = 1\n\n t = ''.join(t)\n return t if len(t) < orig_len else s\n\n # Time Complexity: O(len(s))\n # Space Complexity: O(len(s)), worst case is 2*len(s)", "def strcmp(s, t):\n len_s = len(s)\n len_t = len(t)\n for i in range(min(len_s, len_t)):\n if s[i] != t[i]:\n return ord(s[i]) - ord(t[i])\n return len_s - len_t", "def find_best_candidate(s_array):\n best_string = ''\n max_val = 0\n for s in s_array:\n score = compare(s)\n if score > max_val:\n max_val = score\n best_string = s\n return best_string", "def string_compression(w):\n if len(w) <= 1:\n return w\n\n substrings = []\n prev_char = w[0]\n char_count = 1\n for char in w[1:]:\n if prev_char == char:\n char_count += 1\n else:\n substrings.append('%s%s' % (prev_char, char_count))\n char_count = 1\n prev_char = char\n\n substrings.append('%s%s' % (prev_char, char_count))\n\n compression = ''.join(substrings)\n if len(compression) < len(w):\n return compression\n else:\n return w" ]
[ "0.6012119", "0.59964544", "0.59740466", "0.5591786", "0.5585872", "0.5546276", "0.5545449", "0.5528146", "0.55005383", "0.54773486", "0.5462731", "0.5455454", "0.5421001", "0.5377825", "0.53701454", "0.53483087", "0.5336918", "0.53265125", "0.5279389", "0.5211155", "0.51874715", "0.51840645", "0.5178955", "0.51585513", "0.5154111", "0.512258", "0.51019853", "0.5099482", "0.5033831", "0.5027026" ]
0.6833631
0
Return True if an entry with the exact tuple `x` exists in the index.
def has(self, x, txn=None): x = tuplize(x) tup, key = next(self.pairs(x), (None, None)) return tup == x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, x):\n indexes = self.get_indexes(x)\n return self.sketch[indexes] > 0", "def __contains__(self, idx):\n return idx in self._data", "def contains(self, x):\n raise NotImplementedError", "def does_exist(self, index):\n if index in self.map:\n return True\n return False", "def contains(self, x):\n raise NotImplementedError()", "def __contains__(self, item):\n return item in self._index_map", "def has_index(self, index):\n return index in [s[0] for s in self.get_index_list()]", "def __contains__(self, key):\n return (key in self.index)", "def __contains__(self, x):\n return x in (v for v, _ in self)", "def __contains__(self, x):\n return x in (v for v, _ in self)", "def contains(self, x):\n for i in range(self.k):\n if self.bits[mmh3.hash(x,i) % self.m] == False:\n return False\n return True", "def __contains__(self, key):\n return key in self._index", "def check(self, k, x):\n k = self._checkIndex(k)\n return bool(self.caches[k].count(x))", "def pair_exists(key, value):\n # Initialize key variables\n result = False\n rows = []\n\n # Ignore certain restricted keys\n with db.db_query(20006) as session:\n rows = session.query(Pair.idx_pair).filter(and_(\n Pair.key == key.encode(),\n Pair.value == value.encode()\n ))\n\n # Return\n for row in rows:\n result = row.idx_pair\n break\n return result", "def __contains__(self, index):\r\n\r\n return index in self._contents", "def has(self, index):\n raise NotImplementedError()", "def contains_key_at(self, key, index):\r\n return index < self.num_keys() and self.keys[index] == key", "def __contains__(self, atom_idx):\n if isinstance(atom_idx, Atom):\n return self.atom_list.__contains__(atom_idx)\n elif isinstance(atom_idx, str):\n return self.atom_dict.__contains__(atom_idx)\n raise TypeError, atom_idx", "def __contains__(self, index):\n\n return index in self._contents", "def contains(self, x: object):\n return x in self.items", "def has_key(self,index):\n\t\ttry:\n\t\t\tself.__get(index)\n\t\t\treturn True\n\t\texcept:\n\t\t\treturn False", "def __contains__(self, record):\n with self.session as session:\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.did == record)\n\n return query.exists()", "def __contains__(self, item: Any) -> bool:\n return item in self.item_to_index", "def contains(self, key):\n bus=key%100000\n pos=key//100000\n return self.li[bus][pos]==1", "def is_index(self, key):\n if key not in self:\n return False\n match = key.base.label if self[key].is_tensor else key\n for i in self.extract(key, readby=True):\n for e in retrieve_indexed(i):\n if any(match in idx.free_symbols for idx in e.indices):\n return True\n return False", "def contains(self, x):\n # need more to assure its a real SSP - ie on right torus\n return (len(x) == self._shape[0])", "def __contains__(self,x):\n dbg('_gradelist[..].__contains__(',x,')')\n nems = self._grades #assuming x is a Grade\n if not isinstance(x,Grade): \n #probably looking for a name!\n nems = [q.name for q in self._grades]\n dbg('_gradelist[..].__contains__.nems = ',*nems)\n for i in nems:\n if x==i:\n return True\n return False", "def contains(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n if not self.arr[val]:\n return False\n else:\n return True", "def contains_ring_idx(self, ring_idx):\n return ring_idx in self.rix", "def IndexExists(self, arg0: 'unsigned long long') -> \"bool\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_IndexExists(self, arg0)" ]
[ "0.7002768", "0.69785744", "0.6783254", "0.6741981", "0.6699613", "0.6592085", "0.6581825", "0.65470314", "0.6491138", "0.6491138", "0.64818466", "0.64338917", "0.63576055", "0.6340422", "0.62632", "0.6249271", "0.6247083", "0.62031204", "0.61727875", "0.61569077", "0.6147019", "0.6144128", "0.613454", "0.6110444", "0.60558605", "0.6051246", "0.6006405", "0.59309065", "0.59283614", "0.59280246" ]
0.7292057
0
Yield `get(x)` for each `x` in the iterable `xs`.
def gets(self, xs, txn=None, rec=None, default=None): return (self.get(x, txn, rec, default) for x in xs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def itervalues(self, *args, **kwargs):\n for key in self.iterkeys():\n yield self._get(key, *args, **kwargs)", "def __iter__(self):\n\n # For each key in set of keys\n for key in self.keys_set:\n\n # Yield that key and associated value\n yield key, self.__getitem__(key)", "def __iter__(self):\n for b in self.x:\n yield b", "def items(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None, rec=False):\n for idx_key, key in self.pairs(args, lo, hi, reverse, max,\n include, txn):\n obj = self.coll.get(key, txn=txn, rec=rec)\n if obj:\n yield key, obj\n else:\n warnings.warn('stale entry in %r, requires rebuild' % (self,))", "def scanl(f, base, l):\n yield base\n for x in l:\n base = f(base, x)\n yield base", "def itervalues(self, key=None):\n if key != None:\n vals = self.get(key)\n if vals != None:\n for val in vals:\n yield val\n else:\n for key in self.iterkeys():\n vals = self.get(key)\n for val in vals:\n yield val", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def items(self):\n for ts in self:\n yield ts, self[ts]", "def values(self):\n for ts in self:\n yield self[ts]", "def itervalues(self):\n for key in self:\n yield self[key]", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def __iter__(self):\n for val in self.value:\n yield val", "def get_many(self, *keys):\n key_x_value = self.get_dict(*keys)\n return [key_x_value[key] for key in keys]", "def gets(self, keys, default=None, rec=False, txn=None):\n return (self.get(x, default, rec, txn) for k in keys)", "def mapg(f, C):\n for x in C:\n yield f(x)", "def __iter__(self):\n for i in range(self.n):\n yield self.get(i, i + 1)", "def __call__(self):\n for name in self:\n try:\n yield getattr(self, name)\n except AttributeError:\n raise KeyError(name)", "def _find(xs, predicate):\n for x in xs:\n if predicate(x):\n return x\n return None", "def selections(xs):\n for i, x in enumerate(xs):\n yield (x, xs[:i] + xs[i + 1:])", "def iterentries(self):\n for key in self.iterkeys():\n yield self.get(key)", "def values(self, args=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None, rec=None):\n return itertools.imap(ITEMGETTER_1,\n self.items(args, lo, hi, reverse, max, include, txn, rec))", "def __call__(self, iterable):\n if self._ordered:\n imap = self._distrubtor.imap\n else:\n imap = self._distrubtor.imap_unordered\n\n for result in imap(iterable):\n yield result", "def __iter__(self):\n\n for lit in self.fvals:\n yield lit", "def iteritems(self):\n current = self.first\n\n while current is not None:\n yield current.item\n current = current.next_node", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def get_i(iterable: Iterable, i: int):\n for item in iterable:\n yield item[i]", "def get_values(self, ckey):\n for next_key, item in yield_obj(self, ckey):\n if isdictinstance(item):\n for final, elem in yield_obj(item, next_key):\n if isdictinstance(elem) and elem.has_key(final):\n yield elem[final]\n else:\n yield elem\n elif isinstance(item, list) or isinstance(item, GeneratorType):\n for final, elem in item:\n for last, att in yield_obj(elem, final):\n if isdictinstance(att) and att.has_key(last):\n yield att[last]\n else:\n yield att", "def values(self, key=None, lo=None, hi=None, reverse=None, max=None,\n include=False, txn=None, rec=None):\n return itertools.imap(ITEMGETTER_1,\n self.items(key, lo, hi, reverse, max, include, txn, rec))", "def __iter__(self):\n for domain in self.keys():\n yield domain", "def itervalues(self):\n\n for i in xrange(0, self._limit):\n try:\n yield self[i]\n except KeyError:\n pass" ]
[ "0.58548295", "0.56954026", "0.55972797", "0.5475167", "0.5433384", "0.5421819", "0.5411479", "0.52709156", "0.52600944", "0.52473813", "0.5232616", "0.52298725", "0.5229124", "0.5228351", "0.5143503", "0.5112543", "0.51098645", "0.5067082", "0.5049542", "0.5047411", "0.50272244", "0.50069946", "0.49930698", "0.4984854", "0.49728015", "0.49624512", "0.49440393", "0.4905329", "0.48919412", "0.48908028" ]
0.63888204
0
Associate an index with the collection. Index metadata will be created in the storage engine it it does not exist. Returns the `Index` instance describing the index. This method may only be invoked once for each unique `name` for each collection.
def add_index(self, name, func): assert name not in self.indices info_name = 'index:%s:%s' % (self.info['name'], name) info = self.store._get_info(info_name, index_for=self.info['name']) index = Index(self, info, func) self.indices[name] = index if IndexKeyBuilder: self._index_keys = IndexKeyBuilder(self.indices.values()).build return index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_index(collection, index):\n db[collection].create_index(index)", "def init_index(self, index_name):\n return Index(self, index_name)", "def create_index(index_name, index_config, client):\n client.create(index=index_name, body=index_config)", "def create_index(index_name):\n resp = es.indices.create(index=index_name)\n print(resp)", "def create(\n self,\n index: IO,\n request_options: Optional[_models.RequestOptions] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.SearchIndex:", "def index(self, index):\n \"\"\"\n if index is None:\n raise ValueError(\"Invalid value for `index`, must not be `None`\")\n \"\"\"\n\n self.container['index'] = index", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def add(self, name, index = None):\n if index is None:\n while self.indexDict.has_key(self.count):\n self.count += 1\n index = self.count\n self.fieldDict[name] = index\n self.indexDict[index] = name", "def create(\n self,\n index: _models.SearchIndex,\n request_options: Optional[_models.RequestOptions] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.SearchIndex:", "def test_create_index(self, collection):\n collection.create_index(\"hello\")\n assert collection._indexes == {\"_id_\": ((\"_id\",), {(1,)})}\n\n collection.create_index(\"hello\", unique=True)\n assert collection._indexes == {\n \"_id_\": ((\"_id\",), {(1,)}),\n \"hello_1\": ((\"hello\",), {(\"there\",)}),\n }", "def create_index(self):\n self.send_robust(self.es_index, data=self.es_meta)\n self.set_index_normal_settings()", "def setup(self):\n collection = self._get_collection()\n\n indices = copy(self.params[\"indices\"])\n\n if \"when\" not in indices:\n indices[\"when\"] = {}\n\n for index in indices:\n self.log(DEBUG, \"Ensuring we have index for {}\".format(index))\n\n options = indices[index]\n collection.create_index(index, *options)\n self.log(DEBUG, \"Done.\")", "def create_index(schema, index_name):\n if not os.path.exists(index_name):\n os.mkdir(index_name)\n ix = index.create_in(index_name, schema)\n print(f\"index {index_name} created successfully\")\n return ix", "def create_index():", "def create_index(self, index_name, body):\n if self.es.indices.exists(index_name):\n print(\"deleting '%s' index...\" % index_name)\n res = self.es.indices.delete(index=index_name)\n print(\" response: '%s'\" % res)\n\n print(\"creating '%s' index...\" % index_name)\n res = self.es.indices.create(index=index_name, body=body)\n print(\" response: '%s'\" % res)", "def get_index(self, name):\n for index in self.indexes:\n if index.name == name:\n return index\n return None", "def create_index(self):\n\n indice = client.IndicesClient(self.es)\n\n if not indice.exists(self.es_main_index):\n indice.create(\n index=self.es_main_index\n )\n\n return True", "def create_index(self, db_name):\n\t\tindex_func_path = self._get_index_func_filepath(db_name)\n\t\t\n\t\tif os.path.isfile(index_func_path):\n\t\t\t# create index request payload from predefined file\t\n\t\t\twith open(index_func_path, 'r') as content_file:\n\t\t\t\tpayload = content_file.read()\n\t\t\n\t\t\tprint (\"Create index using function in: {}\".format(index_func_path))\n\t\t\turl = \"https://{}/{}/_design/view\".format(\n\t\t\t\tself.cloudanthost, db_name)\n\t\t\tresponse = self.r.put(url, data=payload)\n\t\t\tassert response.status_code == 201", "def getAnalyzerIndex(self, name):\n\n self.ensureNotCreated()\n\n if not name in self.analyzers:\n raise Exception('Analyzer %r is not present in the framework configuration' % name)\n\n return self.analyzers.index(name)", "def create_or_update(\n self,\n index_name: str,\n prefer: Union[str, _models.Enum0],\n index: IO,\n allow_index_downtime: Optional[bool] = None,\n if_match: Optional[str] = None,\n if_none_match: Optional[str] = None,\n request_options: Optional[_models.RequestOptions] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.SearchIndex:", "def create(\n self,\n index: Union[_models.SearchIndex, IO],\n request_options: Optional[_models.RequestOptions] = None,\n **kwargs: Any\n ) -> _models.SearchIndex:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = case_insensitive_dict(kwargs.pop(\"headers\", {}) or {})\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n api_version: str = kwargs.pop(\"api_version\", _params.pop(\"api-version\", self._config.api_version))\n content_type: Optional[str] = kwargs.pop(\"content_type\", _headers.pop(\"Content-Type\", None))\n cls: ClsType[_models.SearchIndex] = kwargs.pop(\"cls\", None)\n\n _x_ms_client_request_id = None\n if request_options is not None:\n _x_ms_client_request_id = request_options.x_ms_client_request_id\n content_type = content_type or \"application/json\"\n _json = None\n _content = None\n if isinstance(index, (IOBase, bytes)):\n _content = index\n else:\n _json = self._serialize.body(index, \"SearchIndex\")\n\n request = build_create_request(\n x_ms_client_request_id=_x_ms_client_request_id,\n api_version=api_version,\n content_type=content_type,\n json=_json,\n content=_content,\n template_url=self.create.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n path_format_arguments = {\n \"endpoint\": self._serialize.url(\"self._config.endpoint\", self._config.endpoint, \"str\", skip_quote=True),\n }\n request.url = self._client.format_url(request.url, **path_format_arguments)\n\n _stream = False\n pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.SearchError, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n deserialized = self._deserialize(\"SearchIndex\", pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def create_or_update(\n self,\n index_name: str,\n prefer: Union[str, _models.Enum0],\n index: _models.SearchIndex,\n allow_index_downtime: Optional[bool] = None,\n if_match: Optional[str] = None,\n if_none_match: Optional[str] = None,\n request_options: Optional[_models.RequestOptions] = None,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.SearchIndex:", "def create_indexes(self) -> None:\n self.collection.create_index(\"traceId\")\n self.collection.create_index(\"process.serviceName\")", "def AddIndex(self, target):\n if \"w\" not in self.mode:\n raise IOError(\"FileStoreImage %s is not in write mode.\", self.urn)\n predicate = (\"index:target:%s\" % target).lower()\n data_store.DB.MultiSet(self.urn, {predicate: target}, token=self.token,\n replace=True, sync=False)", "def create_index(self, table_name, index, timeout):\n _abstract()", "def create_index(self, table_name, index, timeout):\n _abstract()", "def from_index(cls, index):\n return cls(name=index.name or None,\n fields=index.fields)", "def test_track_index(self, collection):\n collection.create_index(\"hello\", unique=True)\n collection.insert_many([{\"hello\": \"here\"}, {\"hello\": 2}])\n assert collection._indexes == {\n \"_id_\": ((\"_id\",), {(1,), (2,), (3,)}),\n \"hello_1\": ((\"hello\",), {(\"there\",), (\"here\",), (2,)}),\n }", "def _create_update_index(self) -> Result[Ok, Err]:\n collection_status = self.collection\n if collection_status.is_err():\n return collection_status\n collection: MongoCollection = collection_status.ok()\n\n def check_index_keys(current_keys, new_index_keys):\n current_keys.sort()\n new_index_keys.sort()\n return current_keys == new_index_keys\n\n syft_obj = self.settings.object_type\n\n unique_attrs = getattr(syft_obj, \"__attr_unique__\", [])\n object_name = syft_obj.__canonical_name__\n\n new_index_keys = [(attr, ASCENDING) for attr in unique_attrs]\n\n try:\n current_indexes = collection.index_information()\n except BaseException as e:\n return Err(str(e))\n index_name = f\"{object_name}_index_name\"\n\n current_index_keys = current_indexes.get(index_name, None)\n\n if current_index_keys is not None:\n keys_same = check_index_keys(current_index_keys[\"key\"], new_index_keys)\n if keys_same:\n return Ok()\n\n # Drop current index, since incompatible with current object\n try:\n collection.drop_index(index_or_name=index_name)\n except Exception:\n return Err(\n f\"Failed to drop index for object: {object_name} with index keys: {current_index_keys}\"\n )\n\n # If no new indexes, then skip index creation\n if len(new_index_keys) == 0:\n return Ok()\n\n try:\n collection.create_index(new_index_keys, unique=True, name=index_name)\n except Exception:\n return Err(\n f\"Failed to create index for {object_name} with index keys: {new_index_keys}\"\n )\n\n return Ok()" ]
[ "0.7036111", "0.619484", "0.60897195", "0.6011374", "0.59855175", "0.5938999", "0.58643657", "0.58643657", "0.5846917", "0.58329785", "0.5789435", "0.57819605", "0.5722402", "0.5661722", "0.56147593", "0.55916554", "0.5569049", "0.5557823", "0.5556489", "0.551957", "0.5513929", "0.5500406", "0.5485594", "0.5458454", "0.5452821", "0.54440403", "0.54440403", "0.54114133", "0.53858054", "0.5376044" ]
0.66456324
1
Return the first matching record, or None. Like ``next(itervalues(), default)``.
def find(self, key=None, lo=None, hi=None, reverse=None, include=False, txn=None, rec=None, default=None): it = self.values(key, lo, hi, reverse, None, include, txn, rec) v = next(it, default) if v is default and rec and default is not None: v = Record(self.coll, default) return v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first(self, **opts):\n try:\n return next(self.find(**opts))\n except StopIteration:\n if 'default' in opts:\n return opts['default']\n else:\n raise KeyError(\"no matching objects\")", "def find(self, args=None, lo=None, hi=None, reverse=None, include=False,\n txn=None, rec=None, default=None):\n it = self.values(args, lo, hi, reverse, None, include, txn, rec)\n v = next(it, default)\n if v is default and rec and default is not None:\n v = Record(self.coll, default)\n return v", "def peek(self, key, default=None):\n try:\n return iter(self.get(key, [])).next()\n except StopIteration:\n return default", "def getfirst(self, key, default=None):\n \n values = self.getlist(key)\n return values[0] if values else default", "def get(self, x, txn=None, rec=None, default=None):\n for tup in self.items(lo=x, hi=x, include=False, rec=rec):\n return tup[1]\n if rec and default is not None:\n return Record(self.coll, default)\n return default", "def first(self, func: Callable[[T], bool], default=None, raise_exception: bool=True) -> Optional[T]:\n if raise_exception:\n return next(iter(filter(func, self.array)))\n return next(iter(filter(func, self.array)), default)", "def value(\n self, key: _K = 0, default: t.Optional[object] = None\n ) -> t.Any:\n try:\n index = self.index(key)\n except (IndexError, KeyError):\n return default\n else:\n return self[index]", "def first(self):\n try:\n return self.next()\n except StopIteration:\n return None", "def first(sequence, default=Ellipsis):\n if default is Ellipsis:\n return next(iter(sequence))\n else:\n return next(iter(sequence), default)", "def fetchone(self):\n try:\n return next(self._results)\n except StopIteration:\n return None", "def FirstTrue(values, default=None):\n for value in values:\n if value:\n return value\n return default", "def find_one_bykey(cls, keydict, defaultval = None):\n return cls.dbm().modelclass_find_one_bykey(cls, keydict, defaultval)", "def first(self):\n # we need to create a new object to insure we don't corrupt the generator count\n csvsource = CSVSource(self.source, self.factory, self.key())\n try:\n item = csvsource.items().next()\n return item\n except StopIteration:\n return None", "def row_by_value(idl_, table, column, match, default=_NO_DEFAULT):\n tab = idl_.tables[table]\n for r in tab.rows.values():\n if getattr(r, column) == match:\n return r\n if default is not _NO_DEFAULT:\n return default\n raise None", "def any_item(seq, default=None, sort=True):\n if seq is None:\n return default\n if isinstance(seq, (list, tuple)):\n return seq[0] if seq else default\n if isinstance(seq, (str, unicode)):\n return seq\n if hasattr(seq, '__iter__'):\n if sort:\n items = sorted(seq)\n return items[0] if items else default\n else:\n return next(iter(seq), default)\n return seq", "def get(self, key, default=None):\n def find(found_item, _):\n \"\"\" This is the closer function which will be passed to find by key function , if key found than return the value \n otherwise return blanck\"\"\"\n if found_item:\n return found_item[1]\n else:\n return default\n\n return self._find_by_key(key, find)", "def first(self):\n try:\n data = self.get_cursor()[0]\n return self.from_(**self.prepare_data(data))\n except IndexError:\n return None", "async def next(self, default=NO_ITEM) -> typing.Any:\n try:\n return await self.__anext__()\n except StopAsyncIteration:\n if default == NO_ITEM:\n raise\n\n return default", "def get_first_item(videos):\n\n return next(iter(videos or []), None)", "def last(iterable, *default):\n\tassert len(default) <= 1\n\titerable = iter(iterable)\n\n\ttry:\n\t\tx = next(iterable)\n\texcept StopIteration:\n\t\tif default:\n\t\t\treturn default[0]\n\t\traise\n\n\tfor x in iterable:\n\t\tpass\n\treturn x", "def first(self) -> Optional[T]:\n if len(self.entry_finder) == 0:\n return None\n for (_, _, (item,)) in self.priority_queue:\n if item is not None:\n return cast(T, item)\n return None", "def first(data, key):\n for i in data:\n if key(i):\n return i\n return None", "def first(items):\n return next(iter(items or []), None)", "def find_one(cls, *a, **ka):\n try:\n return cls.find(*a, **ka).next()\n except StopIteration:\n raise KeyError", "def first(l):\n return next(iter(l), None)", "def first(collection):\n return next(iter(collection))", "def first(collection):\n return next(iter(collection))", "def get(self, key: str, default: t.Optional[object] = None) -> t.Any:\n try:\n index = self.__keys.index(str(key))\n except ValueError:\n return default\n if 0 <= index < len(self):\n return self._super_getitem_single(index)\n else:\n return default", "def _first(self, \n iterable, \n condition=lambda x: True):\n try:\n return next(x for x in iterable if condition(x))\n except:\n return None", "def get(self, key: str, default=None) -> Any:\n try:\n return self[key][0]\n except KeyError:\n return default" ]
[ "0.7420567", "0.729406", "0.70402557", "0.70098037", "0.6747233", "0.6573423", "0.65181947", "0.64708954", "0.6462105", "0.64256006", "0.6267125", "0.6262828", "0.62594527", "0.62209195", "0.62129086", "0.6191699", "0.6148562", "0.6146063", "0.61223197", "0.609467", "0.6093235", "0.6092012", "0.6024453", "0.6024139", "0.601456", "0.60118395", "0.60118395", "0.6001665", "0.5999214", "0.5976689" ]
0.7424811
0
Fetch a record given its key. If `key` is not a tuple, it is wrapped in a 1tuple. If the record does not exist, return ``None`` or if `default` is provided, return it instead. If `rec` is ``True``, return
def get(self, key, default=None, rec=False, txn=None): key = tuplize(key) it = self._iter(txn, None, key, key, False, None, True, None) tup = next(it, None) if tup: txn_id = getattr(txn or self.engine, 'txn_id', None) obj = self.encoder.unpack(tup[2]) if rec: obj = Record(self, obj, key, tup[0], txn_id, self._index_keys(key, obj)) return obj if default is not None: return Record(self, default) if rec else default return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, x, txn=None, rec=None, default=None):\n for tup in self.items(lo=x, hi=x, include=False, rec=rec):\n return tup[1]\n if rec and default is not None:\n return Record(self.coll, default)\n return default", "def find(self, key=None, lo=None, hi=None, reverse=None, include=False,\n txn=None, rec=None, default=None):\n it = self.values(key, lo, hi, reverse, None, include, txn, rec)\n v = next(it, default)\n if v is default and rec and default is not None:\n v = Record(self.coll, default)\n return v", "def memcacheGetRecord(self, key):\n\n pickled = self.memcacheGet(key)\n return self.unpickleRecord(pickled) if pickled is not None else None", "def get(self, key, default=None):\n def find(found_item, _):\n \"\"\" This is the closer function which will be passed to find by key function , if key found than return the value \n otherwise return blanck\"\"\"\n if found_item:\n return found_item[1]\n else:\n return default\n\n return self._find_by_key(key, find)", "def get(self, key, default=None):\n key = self._validate_key(key)\n sql = u\"\"\"\n SELECT `value` FROM `{table}` WHERE key = ?\n \"\"\".format(table=self.name)\n\n r = self.conn.execute(sql, (key,)).fetchone()\n\n if r:\n return self.convert_out(r['value'])\n\n return default", "def find(self, args=None, lo=None, hi=None, reverse=None, include=False,\n txn=None, rec=None, default=None):\n it = self.values(args, lo, hi, reverse, None, include, txn, rec)\n v = next(it, default)\n if v is default and rec and default is not None:\n v = Record(self.coll, default)\n return v", "def find_one_bykey(cls, keydict, defaultval = None):\n return cls.dbm().modelclass_find_one_bykey(cls, keydict, defaultval)", "def get(self, key: str, default: t.Optional[object] = None) -> t.Any:\n try:\n index = self.__keys.index(str(key))\n except ValueError:\n return default\n if 0 <= index < len(self):\n return self._super_getitem_single(index)\n else:\n return default", "def getfirst(self, key, default=None):\n \n values = self.getlist(key)\n return values[0] if values else default", "def _get_record_from_backend(self, key: str) -> T.Optional[CacheRecord]:\n raise NotImplementedError", "def find_rec(self, _leaf, _key):\n try:\n return True, _leaf.pt[_leaf.keys.index(_key)]\n \n # the key does not exist\n except ValueError:\n return False, \"FAIL FINDING RECORD with KEY: {}\".format(_key)", "async def load(self, category, key, *default, for_update=False):\n stmt = select(self.model)\n\n if for_update:\n stmt = stmt.with_for_update()\n\n if self.category_field:\n stmt = stmt.where(getattr(self.model, self.category_field) == category)\n\n if self.key_field:\n stmt = stmt.where(getattr(self.model, self.key_field) == key)\n\n result = self.session.execute(stmt)\n\n try:\n record = result.scalars().one()\n except NoResultFound:\n if len(default) > 0:\n return default[0]\n else:\n raise KeyError\n\n if self.default_value_field is not None:\n return getattr(record, self.default_value_field)\n else:\n return {\n k: getattr(record, k) for k in self.columns\n }", "async def get_entry(self, key, *, convert=True, as_dict=False):\r\n\r\n query = \"SELECT * FROM {table_name} WHERE {primary_key} = ?\"\r\n cur = await self.data.db.execute(query.format(table_name=self.name, primary_key=self.primary_key.name), [key])\r\n data = await cur.fetchone()\r\n print(data)\r\n if not data:\r\n return []\r\n if convert and as_dict:\r\n raise ArgumentError(\"Incorrect arguments passed. only one can be True between arguments (convert, as_dict)\")\r\n converted = self.compile_as_list([data])\r\n if as_dict:\r\n return data\r\n obj = Record(**converted[0])\r\n return obj", "def get(\n self,\n key: str,\n ) -> T.Optional[VALUE]:\n record = self._get_record_from_backend(key)\n if record is None:\n return None\n\n if record.expire:\n now = utc_now()\n if (now.timestamp() - record.update_ts) < record.expire:\n return self.deserialize(record.value)\n else:\n return None\n else:\n return self.deserialize(record.value)", "def get(self, key: str, default=None) -> Any:\n try:\n return self[key][0]\n except KeyError:\n return default", "def gets(self, keys, default=None, rec=False, txn=None):\n return (self.get(x, default, rec, txn) for k in keys)", "def get(self, key, default=None):\r\n return self.data.get(IdentityRef(key),default)", "def get(self, key, default=None):", "def get(self, key, default=None):\n try:\n return self._get(key)\n except Exception:\n return default", "def get(self, key, default=None):\n result = self._get_raw_input().get(key, default)\n return result[0] if isinstance(result, list) else result", "def _single_getitem(self, key):\n try:\n return self._dict[key]\n except KeyError:\n return self.default", "def get(self, key, default=None):\n try:\n return self.__getitem__(key)\n except ValueError:\n if default is not None:\n return default\n else:\n raise", "def lookup(key, default=None):\n def _lookup(mapping):\n return mapping.get(key, default)\n return _lookup", "def peek(self, key, default=None):\n try:\n return iter(self.get(key, [])).next()\n except StopIteration:\n return default", "def fetch(self, key: object, default=None):\n return self._user_data.get(key, default)", "def fetch_ele(dic, keys, res_key=None):\n if res_key is None:\n res_key = []\n key = keys.pop()\n res_key.append(key)\n if len(keys) == 0:\n return (\".\".join(res_key), dic[key])\n dic = dic[key]\n return fetch_ele(dic, keys, res_key)", "def get(self, key, group='default', default=None):\n if not self.fp:\n raise Exception(\"Please invoke method setup first!\")\n if group not in self.__cache:\n self._reload_group(group)\n try:\n return self.__cache[group][key]\n except KeyError as e:\n if self.fp.has_option(group, key):\n self.__cache[group][key] = self.fp.get(group, key)\n else:\n self.__cache[group][key] = default\n return self.__cache[group][key]", "def get(self, key, default=None):\n return self._d.get(key, default)", "def get(self, key, default=None):\r\n try:\r\n return self.data[key]()\r\n except (KeyError, SleekRefDied):\r\n return default", "def get_record_by_idx(self, rec_idx):\n return self._record_idx_to_record[rec_idx]['rec']" ]
[ "0.64899474", "0.6472775", "0.6147658", "0.61328465", "0.5905932", "0.57399994", "0.5697584", "0.5677325", "0.5673076", "0.5657559", "0.5626687", "0.5621607", "0.560914", "0.5548576", "0.55469507", "0.55213857", "0.55031437", "0.549219", "0.54708385", "0.5448695", "0.5428925", "0.5418116", "0.5410353", "0.5406126", "0.53918916", "0.5389836", "0.5375286", "0.5367876", "0.53638643", "0.5360378" ]
0.6975982
0
Search the key range lo..hi for individual records, combining them into a batches. Returns `(found, made, last_key)` indicating the number of records combined, the number of batches produced, and the last key visited before `max_phys` was exceeded. Batch size is controlled via `max_recs` and `max_bytes`; at least one must not be ``None``. Larger sizes may cause pathological behaviour in the storage engine (for example, space inefficiency). Since batches are fully decompressed before any member may be accessed via
def batch(self, lo=None, hi=None, max_recs=None, max_bytes=None, preserve=True, packer=None, txn=None, max_phys=None, grouper=None): assert max_bytes or max_recs, 'max_bytes and/or max_recs is required.' txn = txn or self.engine packer = packer or self.packer it = self._iter(txn, None, lo, hi, False, None, True, max_phys) groupval = None items = [] for batch, key, data in it: if preserve and batch: self._write_batch(txn, items, packer) else: txn.delete(encode_keys(self.prefix, key)) items.append((key, data)) if max_bytes: _, encoded = self._prepare_batch(items, packer) if len(encoded) > max_bytes: items.pop() self._write_batch(txn, items, packer) items.append((key, data)) done = max_recs and len(items) == max_recs if (not done) and grouper: val = grouper(self.encoder.unpack(data)) done = val != groupval groupval = val if done: self._write_batch(txn, items, packer) self._write_batch(txn, items, packer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_entries_(self, start_key=None, end_key=None):\n # TODO (andrin): fetch a couple of shards instead of just one based on\n # method argument\n current_key = start_key\n if current_key == None:\n current_key = (\"\", )\n limit_shard_name = RecordIOShard.key_name(\n self.name, lo=start_key, hi=end_key).split(SPLIT_CHAR)\n while True:\n shard = RecordIOShard.get_shards_for_key_values(\n self.name, [current_key], keys_only=False).next()[0]\n self.db_search_and_get += 1\n if shard == None:\n raise RecordIOShardDoesNotExistError(self.name)\n hi = shard.lo_hi()[1]\n shard_name = shard.key().name().split(SPLIT_CHAR)\n if (shard_name[6:10] >= limit_shard_name[6:10] and\n (shard_name[2:5] < limit_shard_name[2:5] or\n limit_shard_name[2] == SPLIT_CHAR_AFTER)):\n # Read the whole shard\n for entry in shard:\n yield entry\n else:\n # Read parts of the shard\n for entry in shard.read(current_key, end_key):\n yield entry\n if hi == None:\n # Was the last shard\n return\n current_key = hi\n if (end_key != None and\n RecordIORecords.entry_comperator(current_key, end_key) >= 0):\n # Next shard is after end_key\n return", "def find_hits(records, threshold=15, _result_buffer=None):\n buffer = _result_buffer\n if not len(records):\n return\n samples_per_record = len(records[0]['data'])\n offset = 0\n\n for record_i, r in enumerate(records):\n # print(\"Starting record ', record_i)\n in_interval = False\n hit_start = -1\n\n for i in range(samples_per_record):\n # We can't use enumerate over r['data'], numba gives error\n # TODO: file issue?\n above_threshold = r['data'][i] > threshold\n # print(r['data'][i], above_threshold, in_interval, hit_start)\n\n if not in_interval and above_threshold:\n # Start of a hit\n in_interval = True\n hit_start = i\n\n if in_interval:\n if not above_threshold:\n # Hit ends at the start of this sample\n hit_end = i\n in_interval = False\n\n elif i == samples_per_record - 1:\n # Hit ends at the *end* of this sample\n # (because the record ends)\n hit_end = i + 1\n in_interval = False\n\n if not in_interval:\n # print('saving hit')\n # Hit is done, add it to the result\n if hit_end == hit_start:\n print(r['time'], r['channel'], hit_start)\n raise ValueError(\n \"Caught attempt to save zero-length hit!\")\n res = buffer[offset]\n res['left'] = hit_start\n res['right'] = hit_end\n res['time'] = r['time'] + hit_start * r['dt']\n # Note right bound is exclusive, no + 1 here:\n res['length'] = hit_end - hit_start\n res['dt'] = r['dt']\n res['channel'] = r['channel']\n res['record_i'] = record_i\n\n # Yield buffer to caller if needed\n offset += 1\n if offset == len(buffer):\n yield offset\n offset = 0\n\n # Clear stuff, just for easier debugging\n # hit_start = 0\n # hit_end = 0\n yield offset", "def pack_data_into_batches(self, ids):\n\n # create buckets sorted by the number of src tokens\n # each bucket is also sorted by the number of tgt tokens\n buckets = {}\n for i, line_ids in enumerate(ids):\n len_ = len(line_ids)\n if len_ not in buckets:\n buckets[len_] = [i]\n else:\n buckets[len_].append(i)\n\n for b_idx in buckets:\n buckets[b_idx] = sorted(buckets[b_idx])\n\n buckets = OrderedDict(sorted(buckets.items()))\n\n batches = []\n batch_elem_lengths = []\n curr_batch = []\n len_of_longest_sent = 0\n for sent_len, bucket in buckets.items():\n for sent_i in bucket:\n if sent_len * (len(curr_batch) + 1) > self.tokens_in_batch:\n if not curr_batch:\n raise ValueError(\n f\"The limitation on number of tokens in batch {self.tokens_in_batch} is too strong.\"\n f\"Several sentences contain {sent_len} tokens.\"\n )\n batches.append(curr_batch)\n batch_elem_lengths.append(sent_len)\n curr_batch = []\n curr_batch.append(sent_i)\n len_of_longest_sent = sent_len\n if curr_batch:\n batches.append(curr_batch)\n batch_elem_lengths.append(len_of_longest_sent)\n return batches, batch_elem_lengths", "def items(self, key=None, lo=None, hi=None, reverse=False, max=None,\n include=False, txn=None, rec=None):\n txn_id = getattr(txn or self.engine, 'txn_id', None)\n it = self._iter(txn, key, lo, hi, reverse, max, include, None)\n for batch, key, data in it:\n obj = self.encoder.unpack(data)\n if rec:\n obj = Record(self, obj, key, batch, txn_id,\n self._index_keys(key, obj))\n yield key, obj", "def _binary_search_batch_size(self, params, init_batch_size):\n assert init_batch_size > 0\n low_batch_size = 0\n high_batch_size = None\n batch_size = init_batch_size\n\n # No need to run a warmup or many batches; if it doesn't OOM after 10\n # batches, it should work in general.\n params = params._replace(num_batches=10, num_warmup_batches=0)\n\n # Find high_batch_size first.\n tf.logging.info(\n 'Looking for upper bound to batch size, starting with %d' % batch_size)\n while high_batch_size is None:\n tf.logging.info('Trying batch_size %d' % batch_size)\n params = params._replace(batch_size=batch_size)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n try:\n bench.run()\n low_batch_size = batch_size\n batch_size *= 2\n except tf.errors.ResourceExhaustedError:\n high_batch_size = batch_size - 1\n\n # Binary Search\n tf.logging.info(\n 'Max batch size is in range (%d, %d]. Starting binary search to find '\n 'exact max batch size.' % (low_batch_size, batch_size))\n while low_batch_size < high_batch_size:\n batch_size = (low_batch_size + high_batch_size + 1) // 2\n tf.logging.info('Trying batch_size %d' % batch_size)\n params = params._replace(batch_size=batch_size)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n try:\n bench.run()\n low_batch_size = batch_size\n except tf.errors.ResourceExhaustedError:\n high_batch_size = batch_size - 1\n self.report_benchmark(extras={'max_batch_size': low_batch_size})", "def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):\n for batch_i in range(0, len(sources)//batch_size):\n start_i = batch_i * batch_size\n\n # Slice the right amount for the batch\n sources_batch = sources[start_i:start_i + batch_size]\n targets_batch = targets[start_i:start_i + batch_size]\n\n # Pad\n pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))\n pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))\n\n # Need the lengths for the _lengths parameters\n pad_targets_lengths = []\n for target in pad_targets_batch:\n pad_targets_lengths.append(len(target))\n\n pad_source_lengths = []\n for source in pad_sources_batch:\n pad_source_lengths.append(len(source))\n\n yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths", "def mem_fit(left, right, key, how='inner'):\n rows = PartitionRunner.merge_size(left, right, key, how=how)\n cols = len(right.columns) + (len(right.columns) - (len(key) if isinstance(key, list) else 1))\n required_memory = (rows * cols) * np.dtype(np.float64).itemsize\n return required_memory <= psutil.virtual_memory().available", "def addbatch(self, search, batch, limit, minscore):\n\n for x, result in enumerate(search([data for _, data in batch], limit)):\n # Get input node id\n x, _ = batch[x]\n\n # Add edges for each input uid and result uid pair that meets specified criteria\n for y, score in result:\n if x != y and score > minscore and not self.hasedge(x, y):\n self.addedge(x, y, weight=score)", "def build_map(chunk_start, result, total_chunks, start_id, end_id):\n size = len(chunk_start)\n for i in prange(size):\n beg = chunk_start[i]\n end = chunk_start[i + 1] if i < size - 1 else total_chunks\n if start_id < end and beg < end_id: # [beg, end) intersect [start_id, end_id)\n result[max(beg - start_id, 0) : (end - start_id), 0] = beg\n result[max(beg - start_id, 0) : (end - start_id), 1] = end", "def return_matches(self, hashes, batch_size: int=1000):\n # Create a dictionary of hash => offset pairs for later lookups\n mapper = {}\n for hsh, offset in hashes:\n if hsh in mapper.keys():\n mapper[hsh].append(offset)\n else:\n mapper[hsh] = [offset]\n\n values = list(mapper.keys())\n\n # in order to count each hash only once per db offset we use the dic below\n dedup_hashes = {}\n\n results = []\n\n for index in range(0, len(values), batch_size):\n # Create our IN part of the query\n # query = self.SELECT_MULTIPLE % ', '.join([self.IN_MATCH] * len(values[index: index + batch_size]))\n\n res = self.elasticClient.search(index=\"fingerprints\",\n body={\"query\": {\"terms\": {\"doc.hash\": values[index: index+batch_size]}}}, size=1000000)\n\n query_res = res['hits']['hits']\n for doc in query_res:\n hsh = doc['_source']['doc']['hash']\n sid = doc['_source']['doc']['song_id']\n offset = doc['_source']['doc']['offset']\n if sid not in dedup_hashes.keys():\n dedup_hashes[sid] = 1\n else:\n dedup_hashes[sid] += 1\n # we now evaluate all offset for each hash matched\n for song_sampled_offset in mapper[hsh]:\n results.append((sid, offset - song_sampled_offset))\n\n return results, dedup_hashes", "def batch(self, order_by=(), offset=0, limit=20, filter=None): \n query = self._query \n if filter:\n query = query.filter(filter)\n if order_by:\n query = query.order_by(order_by)\n #limit and offset must be applied after filter and order_by \n query = query.limit(limit).offset(offset) \n for ob in query:\n ob = contained(ob, self, stringKey(ob))\n yield ob", "def range_read_chunk(self, layer: int, x: int, y: int, z: int,\n n_retries: int = 100, max_block_size: int = 1000000,\n row_keys: Optional[Iterable[str]] = None,\n row_key_filters: Optional[Iterable[str]] = None,\n time_stamp: datetime.datetime = datetime.datetime.max,\n ) -> Union[\n bigtable.row_data.PartialRowData,\n Dict[bytes, bigtable.row_data.PartialRowData]]:\n chunk_id = self.get_chunk_id(layer=layer, x=x, y=y, z=z)\n\n if layer == 1:\n max_segment_id = self.get_segment_id_limit(chunk_id)\n max_block_size = max_segment_id + 1\n else:\n max_segment_id = self.get_max_node_id(chunk_id=chunk_id)\n\n # Define BigTable keys\n start_id = self.get_node_id(np.uint64(0), chunk_id=chunk_id)\n end_id = self.get_node_id(max_segment_id, chunk_id=chunk_id)\n try:\n rr = self.range_read(start_id, end_id, n_retries=n_retries,\n max_block_size=max_block_size,\n row_keys=row_keys,\n row_key_filters=row_key_filters,\n time_stamp=time_stamp)\n except:\n raise Exception(\"Unable to consume range read: \"\n \"[%d, %d, %d], l = %d, n_retries = %d\" %\n (x, y, z, layer, n_retries))\n return rr", "def batch_by_size(\r\n self,\r\n indices,\r\n max_tokens=None,\r\n max_sentences=None,\r\n required_batch_size_multiple=1,\r\n ):\r\n from fairseq.data import data_utils\r\n\r\n fixed_shapes = self.get_batch_shapes()\r\n if fixed_shapes is not None:\r\n\r\n def adjust_bsz(bsz, num_tokens):\r\n if bsz is None:\r\n assert max_tokens is not None, \"Must specify --max-tokens\"\r\n bsz = max_tokens // num_tokens\r\n if max_sentences is not None:\r\n bsz = min(bsz, max_sentences)\r\n elif (\r\n bsz >= required_batch_size_multiple\r\n and bsz % required_batch_size_multiple != 0\r\n ):\r\n bsz -= bsz % required_batch_size_multiple\r\n return bsz\r\n\r\n fixed_shapes = np.array(\r\n [\r\n [adjust_bsz(bsz, num_tokens), num_tokens]\r\n for (bsz, num_tokens) in fixed_shapes\r\n ]\r\n )\r\n\r\n try:\r\n num_tokens_vec = self.num_tokens_vec(indices).astype('int64')\r\n except NotImplementedError:\r\n num_tokens_vec = None\r\n\r\n return data_utils.batch_by_size(\r\n indices,\r\n num_tokens_fn=self.num_tokens,\r\n num_tokens_vec=num_tokens_vec,\r\n max_tokens=max_tokens,\r\n max_sentences=max_sentences,\r\n required_batch_size_multiple=required_batch_size_multiple,\r\n fixed_shapes=fixed_shapes,\r\n )", "def next_batch(self, batch_size=8):\n if not self.db:\n raise AssertionError(\"Database not set. Please call setup_read() before calling next_batch().\")\n\n assert self.f[self.label_key].shape[0] == self.f[self.image_key].shape[0]\n\n if self.row_idx + batch_size > self.f[self.label_key].shape[0]:\n self.row_idx = 0\n\n start_idx = self.row_idx\n self.row_idx += batch_size\n\n if self.randomize_access:\n perm = np.sort(self.permutation[start_idx:start_idx + batch_size]).tolist()\n excerpt = self.f[self.image_key][perm], self.f[self.label_key][perm]\n else:\n excerpt = self.f[self.image_key][start_idx:start_idx + batch_size], self.f[self.label_key][\n start_idx:start_idx + batch_size]\n\n return excerpt", "def _batching_call(self, *args, **kw):\n b_start = kw.pop('b_start', None)\n b_size = kw.pop('b_size', None)\n results = list(self._original_call(*args, **kw))\n\n if b_start is None:\n return results\n\n if b_size is None:\n b_size = len(results)\n\n return results[b_start:b_start+b_size]", "def _build_chunk_registry(self, backend_key, dtype):\n\n query = backend_key_to_query(backend_key)\n chunks_registry = self.db[self.col_name].find(\n {**query, 'provides_meta': False},\n {\"chunk_i\": 1, \"data\": 1})\n\n # We are going to convert this to a dictionary as that is\n # easier to lookup\n for doc in chunks_registry:\n chunk_key = doc.get('chunk_i', None)\n if chunk_key is None:\n # Should not happen because of the projection in find\n # but let's double-check:\n raise ValueError(\n f'Projection failed, got doc with no \"chunk_i\":\\n{doc}')\n # Update our registry with this chunks info. Use chunk_i as\n # chunk_key. Make it a *string* to avoid potential key-error\n # issues or json-encoding headaches.\n chunk_len = len(doc.get('data', []))\n result = np.zeros(chunk_len, dtype=dtype)\n for key in np.dtype(dtype).names:\n result[key] = [dd[key] for dd in doc['data']]\n self.chunks_registry[backend_key + str(chunk_key)] = result\n del doc\n\n # Some bookkeeping to make sure we don't buffer too much in this\n # backend. We still need to return at least one hence the 'and'.\n # See: https://github.com/AxFoundation/strax/issues/346\n if backend_key not in self._buffered_backend_keys:\n self._buffered_backend_keys.append(backend_key)\n while (\n (len(self._buffered_backend_keys) > 1 and\n sum(ch.nbytes for ch in self.chunks_registry.values()) / 1e6 > self._buff_mb)\n or len(self._buffered_backend_keys) > self._buff_nruns\n ):\n self._clean_first_key_from_registry()", "def get_all_batches(self, query, bind_vars=None, batch_size=1000):\n write_ops = [\"INSERT\", \"UPDATE\", \"REPLACE\", \"REMOVE\", \"UPSERT\"]\n if any(ele in query.upper() for ele in write_ops):\n raise C8QLGetAllBatchesError(\n \"Write operations provided in the query. Only read operations can be provided\"\n )\n\n cursor = self.execute(\n query=query, bind_vars=bind_vars, batch_size=batch_size, stream=True\n )\n while cursor.has_more():\n cursor.fetch()\n\n result = clean_doc(cursor.batch())\n return result", "def concave_max_binsearch(fn, lb, ub, num_iter=20):\n mid = tf.stop_gradient(.5 * lb + .5 * ub)\n f_mid = fn(mid)\n\n for _ in range(num_iter):\n # Calculate quartiles.\n lq = tf.stop_gradient(.75 * lb + .25 * ub)\n uq = tf.stop_gradient(.25 * lb + .75 * ub)\n f_lq = fn(lq)\n f_uq = fn(uq)\n\n # Identify three cases, recalling that fn is concave.\n # Case 1: f_lq > f_mid > f_uq\n # The maximum occurs in the range [lb, mid].\n # Case 2: f_lq > f_mid > f_uq\n # The maximum occurs in the range [mid, ub].\n # Case 3: f_lq < f_mid > f_uq\n # The maximum occurs in the range [lq, uq].\n case1 = f_lq > f_mid\n case2 = f_uq > f_mid\n lb, ub, mid, f_mid = (\n tf.where(case1, lb, tf.where(case2, mid, lq)),\n tf.where(case1, mid, tf.where(case2, ub, uq)),\n tf.where(case1, lq, tf.where(case2, uq, mid)),\n tf.where(case1, f_lq, tf.where(case2, f_uq, f_mid))\n )\n\n return mid, f_mid", "def get_batch(self, per_name_size):\n start = dict()\n end = dict()\n for name in cell_names:\n start[name] = self._index_in_epoch[name]\n self._index_in_epoch[name] += per_name_size\n if self._index_in_epoch[name] > self._images[name].shape[0]:\n # Finished epoch\n self._epochs_completed[name] += 1\n # Shuffle the data\n perm = np.arange(self._images[name].shape[0])\n np.random.shuffle(perm)\n self._images[name] = self._images[name][perm]\n self._labels[name] = self._labels[name][perm]\n # Start next epoch\n start[name] = 0\n self._index_in_epoch[name] = per_name_size\n assert per_name_size <= self._images[name].shape[0]\n end[name] = self._index_in_epoch[name]\n return start, end", "def batch_gen(self, gen, allow_partial=True):\n # Get N records. We flatten the returned generator to\n # a list because we need to reuse it.\n while True:\n s = list(itertools.islice(gen, self.batch_size))\n if not len(s) or (not allow_partial and len(s) != self.batch_size):\n return\n yield (b''.join([x[0] for x in s]), b''.join([x[1] for x in s]),\n b''.join([x[2] for x in s]), b''.join([x[3] for x in s]),\n b''.join([x[4] for x in s]))", "def get_batch(self, idxs):\r\n return self.data[(self.start + idxs) % self.maxlen]", "def get_items(limit=100, last_key=None):\n\n items = []\n list_key = None\n client = boto3.client('dynamodb', region_name='us-east-1')\n\n args = {\n 'TableName': 'landsat',\n 'Limit': limit\n }\n\n if last_key:\n args['ExclusiveStartKey'] = last_key\n\n response = client.scan(**args)\n\n if response['Count'] > 0:\n for item in response['Items']:\n items.append(json.loads(item['body']['S']))\n\n return (items, response['LastEvaluatedKey'])\n else:\n raise Execption('No record found')", "def fetch_things2(query, chunk_size = 100, batch_fn = None):\r\n orig_rules = deepcopy(query._rules)\r\n query._limit = chunk_size\r\n items = list(query)\r\n done = False\r\n while items and not done:\r\n #don't need to query again at the bottom if we didn't get enough\r\n if len(items) < chunk_size:\r\n done = True\r\n\r\n if batch_fn:\r\n items = batch_fn(items)\r\n\r\n for i in items:\r\n yield i\r\n\r\n if not done:\r\n query._rules = deepcopy(orig_rules)\r\n query._after(i)\r\n items = list(query)", "def do_records(self, batches: List[Batch]) -> Iterator[Tuple[str, str]]:\n if any(type(b) not in [Batch, BatchAppendable] for b in batches):\n raise AssertionError()\n\n if self.doSort:\n generators = [\n ((str(r.header), str(r.seq)) for r in b.sorted(self.doSmart))\n for b in batches\n if type(None) != type(b)\n ]\n\n else:\n generators = [\n ((str(r.header), str(r.seq)) for r in b.record_gen(self.doSmart))\n for b in batches\n if type(None) is not type(b)\n ]\n\n yield from merge(*generators, key=lambda x: x[1])", "def key_lookup_batch(self, batchiter):\n pass", "def estimate_num_spill_files(num_words, key_num_bytes, value_num_bytes, mapreduce_task_io_sort_mb, mapreduce_map_sort_spill_percent):\n # extra bytes added when each (k,v) pair is added to output buffer\n KEY_VALUE_META_DATA_NUM_BYTES = 16\n\n key_len_num_bytes = zero_compress.size_of_zero_compressed_int64(key_num_bytes)\n value_len_num_bytes = zero_compress.size_of_zero_compressed_int64(value_num_bytes)\n\n return math.ceil((num_words * (KEY_VALUE_META_DATA_NUM_BYTES + key_len_num_bytes + key_num_bytes + value_len_num_bytes + value_num_bytes)) /\n (util.MiB_to_bytes(mapreduce_task_io_sort_mb) * mapreduce_map_sort_spill_percent))", "def get_chunks_result(self, data_keys: List[str], fetch_only: bool = False) -> List:", "def _index_group_with_subgroup(self, **kwargs):\n\n log.setLevel(self.log_level)\n # get a list of all the uri to index\n uri_list = kwargs.get('uri_list', self.get_uri_list())\n if not uri_list:\n log.info(\"0 items to index\")\n return\n # results = results[:100]\n # Start processing through uri\n batch_file = os.path.join(CFG.dirs.logs, \"batch_list.txt\")\n # with open(batch_file, \"w\") as fo:\n # fo.write(\"{\")\n log.info(\"'%s' items to index\", len(uri_list))\n self.time_start = datetime.datetime.now()\n batch_size = kwargs.get(\"batch_size\", 12000)\n if len(uri_list) > batch_size:\n batch_end = batch_size\n else:\n batch_end = len(uri_list)\n batch_start = 0\n batch_num = 1\n self.batch_data = {}\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n self.batch_uris = {}\n self.batch_uris[batch_num] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n end = False\n last = False\n final_list = []\n expand_index = kwargs.get(\"expand_index\", True)\n while not end:\n log.debug(\"batch %s: %s-%s\", batch_num, batch_start, batch_end)\n sub_batch = []\n j = 0\n for i in range(batch_start, batch_end):\n # for i, subj in enumerate(uri_list[batch_start:batch_end]):\n qry_size = kwargs.get(\"qry_size\", 1000)\n if j < qry_size:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n if j == qry_size -1 or i == batch_end - 1:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n # with open(batch_file, \"a\") as fo:\n # fo.write(json.dumps({str('%s-%s' % (batch_num, i+1)):\n # [item[0].sparql\n # for item in sub_batch]})[1:-1]+\",\\n\")\n if not kwargs.get(\"no_threading\", False):\n th = threading.Thread(name=batch_start + i + 1,\n target=self._index_sub,\n args=(sub_batch,\n i+1,\n batch_num,))\n th.start()\n else:\n self._index_sub(sub_batch, i+1, batch_num)\n j = 0\n final_list += sub_batch\n sub_batch = []\n else:\n j += 1\n log.debug(datetime.datetime.now() - self.time_start)\n if not kwargs.get(\"no_threading\", False):\n main_thread = threading.main_thread()\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n action_list = []\n for key, items in self.batch_data[batch_num].items():\n if key == 'main':\n es_worker = self.es_worker\n else:\n es_worker = self.other_indexers[key]\n action_list += es_worker.make_action_list(items)\n result = self.es_worker.bulk_save(action_list)\n final_list += self.batch_uris[batch_num]\n self._update_triplestore(result, action_list)\n del action_list\n del self.batch_uris[batch_num]\n del self.batch_data[batch_num]\n try:\n del pyrdf.memorized\n pyrdf.memorized = {}\n except AttributeError:\n pass\n while gc.collect() > 0:\n pass\n # pdb.set_trace()\n batch_end += batch_size\n batch_start += batch_size\n if last:\n end = True\n if len(uri_list) <= batch_size:\n batch_end = len(uri_list)\n last = True\n batch_num += 1\n self.batch_uris[batch_num] = []\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n log.debug(datetime.datetime.now() - self.time_start)\n # with open(batch_file, 'rb+') as fo:\n # fo.seek(-2, os.SEEK_END)\n # fo.truncate()\n # # fo.close()\n # fo.write(\"}\".encode())", "def get_next_batches(fdataloader: sy.FederatedDataLoader, nr_batches: int):\n batches = {}\n for worker_id in fdataloader.workers:\n worker = fdataloader.federated_dataset.datasets[worker_id].location\n batches[worker] = []\n try:\n for i in range(nr_batches):\n next_batches = next(fdataloader)\n for worker in next_batches:\n batches[worker].append(next_batches[worker])\n except StopIteration:\n pass\n return batches", "def generate_batch_doc2VecC_tail(doc_ids, word_ids, doc_len, batch_size, window_size, sample_size):\n data_index = 0\n assert batch_size % window_size == 0\n span = window_size + 1\n buffer = collections.deque(maxlen=span)\n buffer_doc = collections.deque(maxlen=span)\n batches = np.ndarray(shape=(batch_size, window_size + 1), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n batch_doc = np.ndarray(shape=(batch_size, sample_size), dtype=np.int32)\n mask = [1] * span\n mask[-1] = 0\n i = 0\n\n while data_index < len(word_ids):\n if len(set(buffer_doc)) == 1 and len(buffer_doc) == span:\n doc_id = buffer_doc[-1]\n batches[i, :] = list(compress(buffer, mask)) + [doc_id]\n labels[i, 0] = buffer[-1]\n batch_doc[i, :] = random.sample(word_ids[doc_len[doc_id]:doc_len[doc_id + 1]],\n sample_size)\n i += 1\n buffer.append(word_ids[data_index])\n buffer_doc.append(doc_ids[data_index])\n data_index = (data_index + 1) % len(word_ids)\n if i == batch_size:\n yield batches, labels, batch_doc" ]
[ "0.5377973", "0.5250813", "0.52094275", "0.51410776", "0.5034519", "0.5006585", "0.49998978", "0.49376148", "0.49375245", "0.4912089", "0.49118918", "0.48698753", "0.486148", "0.4849708", "0.48431614", "0.48383713", "0.48327553", "0.4824959", "0.48236606", "0.4813737", "0.48078653", "0.47970852", "0.479173", "0.4791252", "0.478669", "0.4779648", "0.47786647", "0.47730038", "0.47387236", "0.47353896" ]
0.6473588
0
Delete a record value without knowing its key. The deleted record is returned, if it existed.
def delete_value(self, val, txn=None): assert self.derived_keys return self.delete(self.key_func(val), txn)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, key):\n if key not in self.db:\n raise LookupError(\"No record for key \\\"%s\\\" exists.\" % key)\n\n record = self.db[key]\n del self.db[key]\n return record", "def delete(self, record):\n temp = self.hashing(record.get_key())\n if self.__buckets[temp].contains(record):\n self.__buckets[temp].delete(record)\n self.__num_records -= 1", "def delete_record(self, key):\n del self._records[key]", "def delete(self, value):\n pass", "def delete(self):\n return Delete(\n From=self.table,\n Where=self._primaryKeyComparison(self._primaryKeyValue())\n ).on(self.transaction, raiseOnZeroRowCount=NoSuchRecord)", "def delete(self, key):\n visitor = VisitorDelete()\n\n self.visit(key, visitor)\n\n if (visitor.result):\n self.size -= 1\n\n return visitor.result", "def delete(table_name, record_id):\n with get_connection() as conn:\n return rethink.table(table_name).get(record_id).delete().run(conn)", "def _delete(self, key):\n return self._store.delete(key)", "def pop(cls, transaction, *primaryKey):\n return cls._rowsFromQuery(\n transaction,\n Delete(\n Where=cls._primaryKeyComparison(primaryKey),\n From=cls.table,\n Return=list(cls.table)\n ),\n lambda: NoSuchRecord()\n ).addCallback(lambda x: x[0])", "def _delete_key(self):\n return self.connection.delete(self.key)", "def delete(self):\n result = self.where({self.__class__.get_primary():self.primary})\n return result.delete()[0]", "def deindex_value(self, value):\n if value:\n value = value.decode('utf-8')\n key = self.index_key(value)\n return self.connection.srem(key, self._instance.get_pk())\n else:\n return True # True?", "async def deleted(self, value):\n pass", "def delete(self, key):\n if key in self._datastore:\n del self._datastore[key]\n return True\n else:\n raise KeyError(\n \"Tried to delete a non existing record\"\n )", "def remove(self, val):\n if not val in self.record:\n return False\n index = self.record[val]\n self.data[index], self.data[-1] = self.data[-1], self.data[index]\n self.record[self.data[index]] = index\n self.data.pop()\n self.record.pop(val)\n return True", "def delete(self, value):\n if len(self.h) > 1 and value:\n\n # find value\n if value in self.d:\n del_idx = self.d[value]\n else:\n del_idx = None\n\n # delete the thing\n if del_idx:\n\n if del_idx == (len(self.h) - 1):\n # last element\n # [None, 1] -> [None]\n self.d[self.h[del_idx]] = None\n self.h.pop()\n else:\n # nth element\n # [None, 1, .. n] -> [None, .. n]\n self.h[del_idx], self.h[len(self.h) - 1] = self.h[len(self.h) - 1], self.h[del_idx]\n self.d[self.h[del_idx]] = del_idx\n self.d[self.h[len(self.h) - 1]] = None\n self.h.pop()\n self.bubble_down(del_idx)", "def delete(self, value):\n if len(self.h) > 1 and value:\n\n # find value\n if value in self.d:\n del_idx = self.d[value]\n else:\n del_idx = None\n\n # delete the thing\n if del_idx:\n\n if del_idx == (len(self.h) - 1):\n # last element\n # [None, 1] -> [None]\n self.d[self.h[del_idx]] = None\n self.h.pop()\n else:\n # nth element\n # [None, 1, .. n] -> [None, .. n]\n self.h[del_idx], self.h[len(self.h) - 1] = self.h[len(self.h) - 1], self.h[del_idx]\n self.d[self.h[del_idx]] = del_idx\n self.d[self.h[len(self.h) - 1]] = None\n self.h.pop()\n self.bubble_down(del_idx)", "def remove_record():\n # could use .../record/<name> in URL or as in this case as an argument .../record?name=bob\n if 'name' not in request.args:\n return \"need a name to delete a record!\", 400\n with RECORD_LOCK:\n if len([r for r in RECORDS if r.get('name') == request.args.get('name')]) == 0:\n return \"no such record found!\", 409\n RECORDS[:] = [r for r in RECORDS if r.get( # copy all but name matches\n 'name') != request.args.get('name')]\n return \"OK\"", "def del_value(self):\n return self.list.pop()", "def delete(self, key):\n return None", "def delete(self, key):", "def _delete_key(self):\n return self.connection.hdel(self.key, self.name)", "def del_one(self, table, q_filter=None, fail_on_empty=True):\n try:\n with self.lock:\n for i, _ in self._find(table, self._format_filter(q_filter)):\n break\n else:\n if fail_on_empty:\n raise DbException(\"Not found entry with filter='{}'\".format(q_filter), HTTPStatus.NOT_FOUND)\n return None\n del self.db[table][i]\n return {\"deleted\": 1}\n except Exception as e: # TODO refine\n raise DbException(str(e))", "def delete_value(self, value):\n del self.index[value]", "def delete(self, e):\n if self.search(e):\n self.table[hash(e) % len(self.table)].remove(e)\n else:\n raise IndexError(\"Unknown value\")", "def dal_delete(key):\n global store\n return store.delete(urllib.quote(key))", "def delete_record(self, record):\n self.get_record(zone_id=record.zone.id, record_id=record.id)\n\n del self._zones[record.zone.id][\"records\"][record.id]\n return True", "async def _delete(self, key):\n return self.__delete(key)", "def delete_instance(self):\n return self.delete().filter(**{\n self.get_pk_name(): self.get_pk()}).execute()", "def compare_and_delete(self, key, prev_value=None, prev_index=None):\n params = None\n\n if prev_value is not None:\n params = {\n 'prevValue': prev_value\n }\n\n if prev_index is not None:\n params = {\n 'prevIndex': prev_index\n }\n\n return self._request_key(key, method='delete', params=params)" ]
[ "0.7265244", "0.6861643", "0.6856352", "0.6801288", "0.66860753", "0.66188616", "0.65957475", "0.6586024", "0.6570801", "0.65685195", "0.6555317", "0.6539724", "0.639691", "0.63921463", "0.6376713", "0.6362288", "0.6362288", "0.6340162", "0.6297596", "0.6295939", "0.6291092", "0.6265326", "0.6265237", "0.6231773", "0.6192127", "0.617296", "0.6140074", "0.61384696", "0.6133044", "0.6128332" ]
0.74312586
0
Substitute a Sympy matrix with some points
def matrix_subs(matrix_2x2, point): arr = [] for el in matrix_2x2: arr.append(el.subs(x1, point[0]).subs(x2, point[1])) M = Matrix([[arr[0], arr[1]], [arr[2], arr[3]]]) return M
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def with_matsym(*simplifies):\n from sympy.matrices import MatrixSymbol\n from symplus.setplus import AbstractSet\n def simplify_with_matsym(expr, *args, **kwargs):\n # expand MatrixSymbol as Matrix: A -> [ A[0,0] ,..]\n mats = list(expr.atoms(MatrixSymbol))\n agents = list(Dummy(str(mat)) for mat in mats)\n def protect_var(var, expr):\n return AbstractSet(var.xreplace(dict(zip(mats, agents))), expr)\n expr = expr.replace(AbstractSet, protect_var)\n expr = expr.xreplace(dict((mat, mat.as_explicit()) for mat in mats))\n expr = expr.xreplace(dict(zip(agents, mats)))\n\n # replace MatrixElement as Symbol: A[i,j] -> Aij\n elems = tuple(elem for mat in mats for elem in mat)\n syms = tuple(Dummy(str(e)) for e in elems)\n expr = expr.xreplace(dict(zip(elems, syms)))\n\n # simplify expression\n for simp in simplifies:\n expr = simp(expr, *args, **kwargs)\n\n # replace Symbol as MatrixElement: Aij -> A[i,j]\n expr = expr.xreplace(dict(zip(syms, elems)))\n\n return expr\n return simplify_with_matsym", "def test_sympmat(n):\n I = np.identity(n)\n O = np.zeros_like(I)\n X = np.block([[O, I], [-I, O]])\n res = symplectic.sympmat(n)\n assert np.all(X == res)", "def test_symplectic(self, tol):\n r = 0.543\n phi = 0.123\n S = symplectic.squeezing(r, phi)\n\n # the symplectic matrix\n O = np.array([[0, 1], [-1, 0]])\n\n assert np.allclose(S @ O @ S.T, O, atol=tol, rtol=0)", "def test_symplectic(self, tol):\n r = 0.543\n phi = 0.123\n S = symplectic.expand(symplectic.two_mode_squeezing(r, phi), modes=[0, 2], N=4)\n\n # the symplectic matrix\n O = np.block([[np.zeros([4, 4]), np.identity(4)], [-np.identity(4), np.zeros([4, 4])]])\n\n assert np.allclose(S @ O @ S.T, O, atol=tol, rtol=0)", "def collocation(term_lhs, term_rhs, psi, points):\n N = len(psi[0]) - 1\n A = sym.zeros(N+1, N+1)\n b = sym.zeros(N+1, 1)\n # Wrap psi in Python functions (psi_) rather than expressions\n # so that we can evaluate psi_ at points[i] (alternative to subs?)\n x = sym.Symbol('x')\n psi_ = {}\n module = \"numpy\" if N > 2 else \"sympy\"\n for derivative in psi:\n psi_[derivative] = [sym.lambdify([x], psi[derivative][i],\n modules=\"sympy\")\n for i in range(N+1)]\n print('...evaluating matrix...', end=' ')\n for i in range(N+1):\n for j in range(N+1):\n print('(%d,%d)' % (i, j))\n A[i,j] = term_lhs(psi_, points, i, j)\n b[i,0] = term_rhs(psi_, points, i)\n print()\n\n # Drop symbolic expressions (and symbolic solve) for\n # all but the smallest problems (troubles maybe caused by\n # derivatives of psi that trigger full symbolic expressions\n # in A; this problem is not evident in interpolation in approx1D.py)\n if N > 2:\n A = A.evalf()\n b = b.evalf()\n print('A:\\n', A, '\\nb:\\n', b)\n c = A.LUsolve(b)\n print('coeff:', c)\n u = 0\n for i in range(len(psi_[0])):\n u += c[i,0]*psi_[0][i](x)\n print('approximation:', u)\n return u", "def my_sympify(expr, normphase=False, matrix=False, abcsym=False, do_qubit=False, symtab=None):\r\n # make all lowercase real?\r\n if symtab:\r\n varset = symtab\r\n else:\r\n varset = {'p': sympy.Symbol('p'),\r\n 'g': sympy.Symbol('g'),\r\n 'e': sympy.E,\t\t\t# for exp\r\n 'i': sympy.I,\t\t\t# lowercase i is also sqrt(-1)\r\n 'Q': sympy.Symbol('Q'),\t # otherwise it is a sympy \"ask key\"\r\n 'I': sympy.Symbol('I'),\t # otherwise it is sqrt(-1)\r\n 'N': sympy.Symbol('N'),\t # or it is some kind of sympy function\r\n 'ZZ': sympy.Symbol('ZZ'),\t # otherwise it is the PythonIntegerRing\r\n 'XI': sympy.Symbol('XI'),\t # otherwise it is the capital \\XI\r\n 'hat': sympy.Function('hat'),\t # for unit vectors (8.02)\r\n }\r\n if do_qubit:\t\t# turn qubit(...) into Qubit instance\r\n varset.update({'qubit': Qubit,\r\n 'Ket': Ket,\r\n 'dot': dot,\r\n 'bit': sympy.Function('bit'),\r\n })\r\n if abcsym:\t\t\t# consider all lowercase letters as real symbols, in the parsing\r\n for letter in string.lowercase:\r\n if letter in varset:\t # exclude those already done\r\n continue\r\n varset.update({letter: sympy.Symbol(letter, real=True)})\r\n\r\n sexpr = sympify(expr, locals=varset)\r\n if normphase:\t # remove overall phase if sexpr is a list\r\n if type(sexpr) == list:\r\n if sexpr[0].is_number:\r\n ophase = sympy.sympify('exp(-I*arg(%s))' % sexpr[0])\r\n sexpr = [sympy.Mul(x, ophase) for x in sexpr]\r\n\r\n def to_matrix(expr):\r\n \"\"\"\r\n Convert a list, or list of lists to a matrix.\r\n \"\"\"\r\n # if expr is a list of lists, and is rectangular, then return Matrix(expr)\r\n if not type(expr) == list:\r\n return expr\r\n for row in expr:\r\n if (not type(row) == list):\r\n return expr\r\n rdim = len(expr[0])\r\n for row in expr:\r\n if not len(row) == rdim:\r\n return expr\r\n return sympy.Matrix(expr)\r\n\r\n if matrix:\r\n sexpr = to_matrix(sexpr)\r\n return sexpr", "def resolves_matrix(self):\n self.P = np.linalg.solve(self.M, self.f)", "def to_sympy(self, a):\n raise NotImplementedError", "def from_sympy(self, a):\n raise NotImplementedError", "def matsimp(expr):\n from sympy.simplify.simplify import bottom_up\n\n # do indexing: [.., aij ,..][i,j] -> aij\n expr = do_indexing(expr)\n # deep doit: Trace([.., aij ,..]) -> ..+ aii +..\n expr = bottom_up(expr, lambda e: e.doit())\n\n def mateq_expand(m1, m2):\n if not is_Matrix(m1) and not is_Matrix(m2):\n return Eq(m1, m2)\n if not is_Matrix(m1) or not is_Matrix(m2):\n return false\n if m1.shape != m2.shape:\n return false\n return And(*[Eq(e1, e2) for e1, e2 in zip(m1, m2)])\n\n def matne_expand(m1, m2):\n if not is_Matrix(m1) and not is_Matrix(m2):\n return Ne(m1, m2)\n if not is_Matrix(m1) or not is_Matrix(m2):\n return true\n if m1.shape != m2.shape:\n return true\n return Or(*[Ne(e1, e2) for e1, e2 in zip(m1, m2)])\n\n # expand matrix equation: [.., aij ,..] == [.., bij ,..] -> ..& aij == bij &..\n # [.., aij ,..] != [.., bij ,..] -> ..| aij != bij |..\n expr = expr.replace(Eq, mateq_expand)\n expr = expr.replace(Ne, matne_expand)\n\n return expr", "def test_means_changebasis(self):\n means_xp = np.array([1, 2, 3, 4, 5, 6])\n means_symmetric = np.array([1, 4, 2, 5, 3, 6])\n\n assert np.all(symplectic.xxpp_to_xpxp(means_xp) == means_symmetric)\n assert np.all(symplectic.xpxp_to_xxpp(means_symmetric) == means_xp)", "def matrix_spy(self, mtrx):\r\n import matplotlib.pylab as pl\r\n pl.spy(mtrx,precision=0.01, markersize=1)\r\n pl.show()", "def inversion(origin=(0, 0, 0)):\n mat = -np.eye(4)\n mat[3, 3] = 1\n mat[0:3, 3] = 2 * np.array(origin)\n return SymmOp(mat)", "def SymmetriseMatrix(adjmatrix):\n\n if galib.metrics.Reciprocity(adjmatrix) == 1:\n # if Reciprocity(adjmatrix) == 1:\n return adjmatrix\n else:\n return 0.5 * (adjmatrix + adjmatrix.T)", "def matrix_sym_op(x):\n return (x + tf.transpose(x))/2", "def _symbolic_system(self):\n system = sym.Matrix(self._symbolic_equations)\n return system.subs(self._symbolic_change_of_vars)", "def quadratic_expansion(matrix):\n arr = np.copy(matrix)\n arr = np.array([x + x ** 2 for x in arr])\n return np.concatenate((matrix, arr), axis=1)", "def test_tensors_can_substitute_scalars(free_alg, full_balance):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n y = IndexedBase('y')\n z = IndexedBase('z')\n r = p.R\n i, j, k, l, m = p.R_dumms[:5]\n\n x_def = dr.define(\n x[i], dr.sum((j, r), y[j] * z[i])\n )\n orig = dr.sum((i, r), x[i] ** 2 * x[k])\n\n # k is free.\n expected = dr.sum(\n (i, r), (j, r), (l, r), (m, r),\n z[i] ** 2 * y[j] * y[l] * y[m] * z[k]\n )\n\n # Test different ways to perform the substitution.\n for res in [\n orig.subst(x[i], x_def.rhs, full_balance=full_balance),\n orig.subst_all([x_def], full_balance=full_balance),\n orig.subst_all([(x[i], x_def.rhs)], full_balance=full_balance),\n x_def.act(orig, full_balance=full_balance)\n ]:\n assert res.simplify() == expected.simplify()", "def symmetrize(X):\n\n def to_einstr(ls):\n return \"\".join([chr(ord('a') + l) for l in ls])\n\n D, R = X.shape[0], len(X.shape)\n X_ = np.zeros(X.shape)\n for new_order in it.permutations(xrange(R)):\n # Permute axes with einsum\n X_ += np.einsum('%s->%s'%(to_einstr(range(R)), to_einstr(new_order)), X) / np.math.factorial(R)\n\n return X_", "def interpolate_matrix(matrix):", "def test_expend_not_square(self):\n with pytest.raises(ValueError, match=\"The input matrix is not square\"):\n symplectic.expand_passive(np.ones((3, 2)), [0, 1, 2], 5)", "def test_canonicalization_of_vectors_w_symm(free_alg):\n\n dr = free_alg\n p = dr.names\n x = IndexedBase('x')\n r = p.R\n i, j = p.i, p.j\n\n vs = Vec('vs')\n dr.set_symm(vs, Perm([1, 0]), valence=2)\n tensor = dr.sum((i, r), (j, r), x[i, j] * vs[j, i])\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == x[i, j]\n assert term.vecs == (vs[i, j],)\n\n va = Vec('va')\n dr.set_symm(va, Perm([1, 0], NEG), valence=2)\n tensor = dr.sum((i, r), (j, r), x[i, j] * va[j, i])\n res = tensor.simplify()\n assert res.n_terms == 1\n term = res.local_terms[0]\n assert term.sums == ((i, r), (j, r))\n assert term.amp == -x[i, j]\n assert term.vecs == (va[i, j],)", "def set_rhs(self):\n k = self.istore[:, 0]\n ksym = self.stencil.get_symmetric()[k]\n self.rhs[:] = self.feq[k, np.arange(k.size)] + self.feq[ksym, np.arange(k.size)]", "def set_rhs(self):\n k = self.istore[:, 0]\n ksym = self.stencil.get_symmetric()[k]\n self.rhs[:] = self.feq[k, np.arange(k.size)] + self.feq[ksym, np.arange(k.size)]", "def test_special_substitution_of_identity(free_alg):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n t = IndexedBase('y')\n a = IndexedBase('a')\n i, j = p.i, p.j\n v = p.v\n w = Vec('w')\n\n orig = dr.sum((i, p.R), x[i] * v[i] + a[i])\n ident_def = dr.define(1, dr.einst(t[i] * w[i]))\n\n res = orig.subst_all([ident_def])\n assert dr.simplify(\n res - dr.einst(x[i] * v[i])\n - dr.sum((i, p.R), (j, p.R), a[i] * t[j] * w[j])\n ) == 0", "def stability_function_unexpanded(self):\n import sympy\n z = sympy.var('z')\n s = len(self)\n I = sympy.eye(s)\n\n v = 1 - self.alpha.sum(1)\n vstar = sympy.Matrix(v[:-1])\n v_mp1 = sympy.Rational(v[-1])\n alpha_star = sympy.Matrix(self.alpha[:-1,:])\n beta_star = sympy.Matrix(self.beta[:-1,:])\n alpha_mp1 = sympy.Matrix(self.alpha[-1,:])\n beta_mp1 = sympy.Matrix(self.beta[-1,:])\n p1 = (alpha_mp1 + z*beta_mp1).T*(I-alpha_star-z*beta_star).lower_triangular_solve(vstar)\n p1 = p1[0] + v_mp1\n return p1", "def test_numbers_can_substitute_scalars(free_alg, full_balance):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n y = IndexedBase('y')\n r = Range('D', 0, 2)\n i, j, k, l = symbols('i j k l')\n dr.set_dumms(r, [i, j, k, l])\n v = p.v\n\n orig = dr.sum((i, r), x[i] ** 2 * x[j] * y[k] * v[l])\n\n res = orig.subst(x[i], 0, full_balance=full_balance).simplify()\n assert res == 0\n res = orig.subst(x[j], 1, full_balance=full_balance).simplify()\n assert res == dr.sum(2 * y[k] * v[l])\n res = orig.subst(x[k], 2, full_balance=full_balance).simplify()\n assert res == dr.sum(16 * y[k] * v[l])", "def test_symplectic_multimode(self, tol):\n r = [0.543] * 4\n phi = [0.123] * 4\n S = symplectic.squeezing(r, phi)\n\n # the symplectic matrix\n O = symplectic.sympmat(4)\n\n assert np.allclose(S @ O @ S.T, O, atol=tol, rtol=0)", "def test_special_XYX(self, angexp):\n a, b, c, d = angexp[0]\n exp = {(\"rx\", \"ry\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n tgt = np.exp(1j * d) * RXGate(b).to_matrix() @ RYGate(a).to_matrix() @ RXGate(c).to_matrix()\n self.check_oneq_special_cases(tgt, \"XYX\", exp)", "def new_basis(abc, lattice):\n return np.dot(abc.T, lattice.inv_matrix.T)" ]
[ "0.63315576", "0.6180127", "0.5928219", "0.5822502", "0.57564455", "0.569143", "0.55408376", "0.55001694", "0.5494007", "0.5492283", "0.540052", "0.5382228", "0.536797", "0.53523666", "0.5345227", "0.526552", "0.52366996", "0.51955366", "0.51734364", "0.5167767", "0.5157274", "0.5149801", "0.5149152", "0.5149152", "0.51360273", "0.51205915", "0.5109256", "0.5089682", "0.5077462", "0.5072366" ]
0.6811872
0
Get the Jacobian matrix from a gradient; or two functions in a 1d array
def get_jacobian(gradient): gradient_of_x1 = get_gradient(gradient[0]) fx1x1 = gradient_of_x1[0] fx2x1 = gradient_of_x1[1] gradient_of_x2 = get_gradient(gradient[1]) fx1x2 = gradient_of_x2[0] fx2x2 = gradient_of_x2[1] M = Matrix([[fx1x1, fx2x1], [fx1x2, fx2x2]]) return M
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jacobian(f, x):\n\n B, N = x.shape\n x.requires_grad = True\n in_ = torch.zeros(B, 1)\n \n y = f(in_, x)\n jacobian = list()\n \n for i in range(N):\n v = torch.zeros_like(y)\n v[:, i] = 1.\n dy_i_dx = torch.autograd.grad(y,\n x,\n grad_outputs=v,\n retain_graph=True,\n create_graph=True,\n allow_unused=True)[0] # shape [B, N]\n jacobian.append(dy_i_dx)\n\n jacobian = torch.stack(jacobian, dim=2).requires_grad_()\n\n return jacobian", "def jacobian(f, x, epsilon = 1e-10):\n f_ = f(x)\n value = np.zeros((len(f_), len(x)))\n \n for i in range(len(x)):\n f_ = partial_derivative(f, x, i, epsilon)\n value[:,i] = f_\n\n return value", "def jacobian(self, xs):\n rx_list = []\n for nx,x in enumerate(xs):\n \n numpy.testing.assert_array_almost_equal(self.independentVariableShapeList[nx], numpy.shape(x), err_msg = '\\ntaped xs[%d].shape != forward xs[%d]\\n'%(nx,nx))\n rx = numpy.ravel(x)\n rx_list.append(rx)\n self.x = numpy.concatenate(rx_list)\n return wrapped_functions.jacobian(self.tape_tag, self.x)", "def JacobianFunction(p,x,y,z):\n \n n = len(x)\n \n J = np.array([ np.ones((n)),x,x**2,y,y**2,x*y ])\n \n return J", "def jacobian_func(f):\n jacobian = jacfwd(f)\n return jacobian", "def jacobian(self,x,p,fun):\n n = self.n\n y = fun(x,p)\n h = 1e-4\n nout = np.size(y)\n dfdx = np.zeros((nout,n))\n for j in range(n):\n dx1 = np.zeros(n)\n dx2 = np.zeros(n)\n dx1[j] = -h\n dx2[j] = h\n dfdx[:,j] = (fun(x+dx2,p)-fun(x+dx1,p))/(2*h)\n return dfdx", "def backward(self, y_grad):\n jacobian_diag = np.zeros(shape=(self.y.shape[1],self.y.shape[1]))\n\n jacobian_main = np.zeros(self.y.shape)\n\n\n for k in range(self.y.shape[0]):\n\n for i in range(self.y.shape[1]):\n for j in range(self.y.shape[1]):\n if i == j:\n jacobian_diag[i][j] = self.y[k][i]\n else: \n jacobian_diag[i][j] = 0\n\n jacobian = jacobian_diag - np.dot(np.transpose([self.y[k]]), [self.y[k]])\n dot = np.dot(y_grad,jacobian)\n jacobian_main[k] = dot[k]\n\n return jacobian_main", "def jacobian(self, x1, x2, out=None):\n raise NotImplementedError", "def compute_jacobian(self):\n \n d = len(self.theta)\n n,p = self.b.shape\n \n if not self.quiet:\n print \"Running jacobian computation.\"\n print \"D will be a {}x{}x{} array\".format(p,n,d)\n \n if self.x is None:\n raise ValueError('Can not compute Jacobian. self.x is None.')\n \n #print \"n={},n={}\".format(n,d);\n \n D = numpy.zeros((p,n,d))\n \n \n for k in range(d):\n A_k, b_k = self.get_diff_A_b(k)\n \n for i in range(p):\n D[i,:,k] = - self.solver.backsolve(A_k.dot(self.x[:,i]) - b_k[:,i])\n \n return D", "def jacobian(f, x, dx):\n x = np.atleast_1d(x)\n dx = np.atleast_1d(dx)\n nx = len(x)\n ny = 0\n jacobi = None\n e = np.zeros(nx)\n for ix in xrange(nx):\n e *= 0\n e[ix] = 1\n deriv = np.atleast_1d((f(x + e * dx) - f(x - e * dx)) / (2 * dx[ix]))\n if ix == 0:\n ny = len(deriv)\n jacobi = np.empty((ny, nx))\n jacobi[:, ix] = deriv\n return jacobi", "def jacobian(self, x):\n x_ = np.atleast_2d(x)\n if self.normalize:\n x_ = (x_ - self.sample_mean) / self.sample_std\n s_ = (self.samples - self.sample_mean) / self.sample_std\n else:\n s_ = self.samples\n\n fx, jf = self.reg_model(x_)\n rx, drdx = self.corr_model(x=x_, s=s_, params=self.corr_model_params, dx=True)\n y_grad = np.einsum('ikj,jm->ik', jf, self.beta) + np.einsum('ijk,jm->ki', drdx.T, self.gamma)\n if self.normalize:\n y_grad = y_grad * self.value_std / self.sample_std\n if x_.shape[1] == 1:\n y_grad = y_grad.flatten()\n return y_grad", "def _grad_j(q_j, A_j, b_j, b_j_norm, a_1_j, a_2_j, m):\n return (A_j.t() @ q_j / (-m)) + (b_j * (a_1_j / b_j_norm + a_2_j))", "def jacobian_finite_difference(func, arg_index, *args): \n eps = 1e-5\n\n dim_out = func(*args).shape[0]\n dim_in = args[arg_index].shape[0]\n J = np.zeros([dim_out, dim_in], dtype=np.float32)\n\n for i in range(dim_in):\n args0 = [arg.copy() for arg in args]\n args1 = [arg.copy() for arg in args]\n \n args0[arg_index][i] += eps\n args1[arg_index][i] -= eps\n \n f0 = func(*args0)\n f1 = func(*args1)\n \n J[:,i] = (f0-f1) / (2*eps)\n return J", "def jacobian(self,x,y,l,a):\n J = np.zeros([*x.shape,2,2])\n\n J = _jacobian(x,y,l,a,J)\n\n return J", "def jacobian(self, A, B):\r\n\r\n # Compute the derivatives spectrally\r\n A_x_hat = self.calc_derivative(A, 'x')\r\n A_y_hat = self.calc_derivative(A, 'y')\r\n B_x_hat = self.calc_derivative(B, 'x')\r\n B_y_hat = self.calc_derivative(B, 'y')\r\n\r\n # Compute the values in realspace for multiplication\r\n A_x = self.inverse_fft(self.dealias_pad(A_x_hat))\r\n A_y = self.inverse_fft(self.dealias_pad(A_y_hat))\r\n B_y = self.inverse_fft(self.dealias_pad(B_y_hat))\r\n B_x = self.inverse_fft(self.dealias_pad(B_x_hat))\r\n\r\n # Compute the Jacobian\r\n J_canonical = (A_x*B_y) - (B_x*A_y)\r\n\r\n # Return to spectral space the return\r\n return self.dealias_unpad(self.forward_fft(J_canonical))", "def jacobian(expression, wrt, consider_constant=None, disconnected_inputs=\"raise\"):\n\n if not isinstance(expression, Variable):\n raise TypeError(\"jacobian expects a Variable as `expression`\")\n\n if expression.ndim > 1:\n raise ValueError(\n \"jacobian expects a 1 dimensional variable as `expression`.\"\n \" If not use flatten to make it a vector\"\n )\n\n using_list = isinstance(wrt, list)\n using_tuple = isinstance(wrt, tuple)\n\n if isinstance(wrt, (list, tuple)):\n wrt = list(wrt)\n else:\n wrt = [wrt]\n\n if expression.ndim == 0:\n # expression is just a scalar, use grad\n return as_list_or_tuple(\n using_list,\n using_tuple,\n grad(\n expression,\n wrt,\n consider_constant=consider_constant,\n disconnected_inputs=disconnected_inputs,\n ),\n )\n\n def inner_function(*args):\n idx = args[0]\n expr = args[1]\n rvals = []\n for inp in args[2:]:\n rval = grad(\n expr[idx],\n inp,\n consider_constant=consider_constant,\n disconnected_inputs=disconnected_inputs,\n )\n rvals.append(rval)\n return rvals\n\n # Computing the gradients does not affect the random seeds on any random\n # generator used n expression (because during computing gradients we are\n # just backtracking over old values. (rp Jan 2012 - if anyone has a\n # counter example please show me)\n jacobs, updates = aesara.scan(\n inner_function,\n sequences=aesara.tensor.arange(expression.shape[0]),\n non_sequences=[expression] + wrt,\n )\n assert not updates, \"Scan has returned a list of updates; this should not happen.\"\n return as_list_or_tuple(using_list, using_tuple, jacobs)", "def derivative_matrix(g):\n\n def _(g):\n B = g.B[0].grad\n N = g.N[0]\n P = g.dec.P(1)\n H = np.vstack(P(B(i)) for i in range(N)).T\n return H\n\n return _(g), _(g.dual)", "def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx,\n extra_feed_dict):\n # Complex vectors are treated as vectors of twice as many reals.\n if x.dtype.is_complex:\n x_shape = tuple(x_shape) + (2,)\n dy_factor = 2 if dy.dtype.is_complex else 1\n\n # To compute the jacobian, we treat x and y as one-dimensional vectors.\n x_size = _product(x_shape)\n x_val_size = _product(x_shape[1:]) # This is used for sparse gradients\n dy_size = _product(dy_shape) * dy_factor\n\n # Allocate 2-D Jacobian, with x dimensions smashed into the first\n # dimension and y dimensions smashed into the second.\n jacobian = np.zeros((x_size, dy_size),\n dtype=x.dtype.real_dtype.as_numpy_dtype)\n\n # For each of the entry of dy, we set this to be 1 and\n # everything else to be 0 and compute the backprop -- this will give us one\n # one column of the Jacobian matrix.\n dy_data = np.zeros(dy_shape, dtype=dy.dtype.as_numpy_dtype)\n dy_data_flat = dy_data.ravel().view(dy.dtype.real_dtype.as_numpy_dtype)\n sess = tf.get_default_session()\n for col in range(dy_size):\n dy_data_flat[col] = 1\n if isinstance(dx, tf.IndexedSlices):\n backprop_indices, backprop_values = sess.run(\n [dx.indices, dx.values],\n feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n for i, v in zip(backprop_indices, backprop_values):\n r_begin = i * x_val_size\n r_end = r_begin + x_val_size\n jacobian[r_begin:r_end, col] += v.flat\n else:\n assert isinstance(dx, tf.Tensor), \"dx = \" + str(dx)\n backprop = sess.run(\n dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n jacobian[:, col] = backprop.ravel().view(jacobian.dtype)\n dy_data_flat[col] = 0\n\n # If the output is empty, run the gradients at least once and make sure\n # they produce zeros.\n if not dy_size:\n backprop = sess.run(\n dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))\n if backprop.shape != x_data.shape:\n raise ValueError(\"Empty gradient has wrong shape: expected %s, got %s\" %\n (x_data.shape, backprop.shape))\n if np.any(backprop):\n raise ValueError(\"Empty tensor with nonzero gradients\")\n\n return jacobian", "def gradient(theta, x, y):\n m = len(y)\n n = len(theta)\n z = theta.dot(x.T)\n grad = np.zeros(n)\n for i in xrange(m):\n grad += (g(z[i]) - y[i]) * x[i]\n return 1. / m * grad", "def jacobian(self, dt):\n return self._F_cache", "def transform_and_compute_jacobian(self, xj):\n x = xj[:, :self.d].detach()\n log_j = xj[:, -1]\n\n x.requires_grad = True\n y = self.flow_(x)\n\n n_batch = xj.shape[0]\n\n jx = torch.zeros(n_batch, self.d, self.d).to(log_j.device)\n directions = torch.eye(self.d).to(log_j).unsqueeze(0).repeat(n_batch, 1, 1)\n\n for i in range(self.d):\n jx[:, i, :] = torch.autograd.grad(y, x, directions[:, i, :],\n allow_unused=True, create_graph=True, retain_graph=True)[0]\n x.requires_grad = False\n x.grad = None\n\n log_det_j = torch.log(torch.abs(torch.det(jx)))\n return torch.cat([y.detach(), (log_j + log_det_j).unsqueeze(1)], 1)", "def jacobian_i(self, x):\n return np.matrix([-x**3, -x**2, -x, -1])", "def jacobian(self, b):\n \n # Substitute parameters in partial derivatives\n subs = [pd.subs(zip(self._b, b)) for pd in self._pderivs]\n # Evaluate substituted partial derivatives for all x-values\n vals = [sp.lambdify(self._x, sub, \"numpy\")(self.xvals) for sub in subs]\n # Arrange values in column-major order\n return np.column_stack(vals)", "def test_jacobian_disconnected_inputs():\r\n v1 = tensor.vector()\r\n v2 = tensor.vector()\r\n jacobian_v = theano.gradient.jacobian(1 + v1, v2,\r\n disconnected_inputs='ignore')\r\n func_v = theano.function([v1, v2], jacobian_v)\r\n val = numpy.arange(4.0).astype(theano.config.floatX)\r\n assert numpy.allclose(func_v(val, val), numpy.zeros((4, 4)))\r\n\r\n s1 = tensor.scalar()\r\n s2 = tensor.scalar()\r\n jacobian_s = theano.gradient.jacobian(1 + s1, s2,\r\n disconnected_inputs='ignore')\r\n func_s = theano.function([s2], jacobian_s)\r\n val = numpy.array(1.0).astype(theano.config.floatX)\r\n assert numpy.allclose(func_s(val), numpy.zeros(1))", "def calc_jacobian(*args, **kwargs):\n try:\n tag = kwargs[\"tag\"]\n except:\n tag = 0\n\n try:\n sparse = kwargs[\"sparse\"]\n except:\n sparse = True\n\n if sparse:\n try:\n shape = kwargs[\"shape\"]\n except:\n raise ValueError(\"'shape' should be passed to calculate sparse jacobian!\")\n\n \n options = np.array([0,0,0,0],dtype=int)\n result = ad.colpack.sparse_jac_no_repeat(tag, *args, options=options)\n nnz = result[0]\n ridx = result[1]\n cidx = result[2]\n values = result[3]\n assert nnz > 0\n jac = sp.csr_matrix((values, (ridx, cidx)), shape=shape)\n jac = jac.toarray()\n else:\n jac = ad.jacobian(tag, *args)\n return jac", "def jacobian(self,var,g=None):\n if (g==None):g=self.g\n jac=np.zeros([self.n+1,self.n])\n for i in range(self.n):\n for j in range(self.n):\n if(i==j): jac[i][j]=2.*(var[i]+1.)-g*np.sum([self.XXZ.Z(i,k) for k in range(self.n) if k!=i])\n else: jac[i][j]=g*self.XXZ.Z(i,j)\n for i in range(self.n):\n jac[self.n][i]=1.\n return jac", "def fd_jacobian(self,y):\n res0 = self.residual(y)\n eps = 1e-6\n dofs = y.shape[0]\n jac_approx = np.zeros((dofs,dofs))\n for i in range(dofs):\n y_temp = np.copy(y)\n y_temp[i]+=eps\n\n r2 = self.residual(y_temp)\n dr = (r2-res0)/eps\n for j in range(dofs):\n jac_approx[j,i] = dr[j]\n \n return jac_approx", "def jacobian(self, x):\n pass", "def jacobian(self, dt):\n raise NotImplementedError", "def jacobian(self, dt):\n if dt not in self._F_cache:\n d = self._dimension\n with torch.no_grad():\n F = eye_like(self.sa2, d)\n F[: d // 2, d // 2 :] = dt * eye_like(self.sa2, d // 2)\n self._F_cache[dt] = F\n\n return self._F_cache[dt]" ]
[ "0.7145842", "0.68220186", "0.67845017", "0.67727375", "0.6742965", "0.66961354", "0.66790104", "0.65935296", "0.65695715", "0.6567825", "0.6539777", "0.65281737", "0.6505052", "0.6449393", "0.64071274", "0.6402486", "0.63869303", "0.6353686", "0.6327738", "0.6325424", "0.6301552", "0.6300443", "0.627622", "0.62664473", "0.6244105", "0.6241652", "0.62332815", "0.6227386", "0.62079", "0.6197052" ]
0.7826469
0
Simply load the predictions associated with the VERSION data
def load_predict(path=MODEL_PATH, version=VERSION, namePredictor=DEFAULT_PREDICTOR): logging.info("trying to load {}".format(path + namePredictor + version + '.npz')) return np.load(path + namePredictor + version + '.npz')['pred']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_version(cls, unpickler, version):\n model = unpickler.load()\n if version == 0:\n feature = model._state['features']\n model._state['output_column_name'] = 'extracted.' + feature\n return model", "def predict(self, data, version='default'):\n if self.transform_service:\n data = self.transform_service.predict(data, version)\n return self.model_service.predict(data, version)", "def _load(predictions, f):\n\n # with open(f) as json_file:\n data = json.load(f)\n for p in data['predictions']:\n prediction = Prediction(p)\n predictions[prediction.example_id] = prediction", "def load_predicted_results(self):\n print(\"\\n\\nLoad prediction answers : \")\n with open(\"predicted_results\", \"rb\") as predicted_results:\n self.predicted_results = pickle.load(predicted_results)", "def test_file(self, file_name, version, classifier_type):\n labels = []\n with open(file_name) as f:\n for line in f.readlines():\n print(line,self.predict(line))\n labels.append(self.predict(line))\n \n filename = 'test_results-' + classifier_type + '-' + version + '.txt'\n \n with open(filename, 'w') as f:\n for label in labels:\n f.write(str(label)+\"\\n\")\n \n print (\"Results from \",file_name,\" printed to:\",filename)", "def predict(self, datafile):", "def _load_version(cls, state, version):\n _tkutl._model_version_check(version, cls._PYTHON_IMAGE_CLASSIFIER_VERSION)\n from turicreate.toolkits.classifier.logistic_classifier import LogisticClassifier\n state['classifier'] = LogisticClassifier(state['classifier'])\n state['classes'] = state['classifier'].classes\n\n # Load pre-trained model & feature extractor\n model_name = state['model']\n if model_name == \"VisionFeaturePrint_Screen\" and _mac_ver() < (10,14):\n raise ToolkitError(\"Can not load model on this operating system. This model uses VisionFeaturePrint_Screen, \"\n \"which is only supported on macOS 10.14 and higher.\")\n state['feature_extractor'] = _image_feature_extractor._create_feature_extractor(model_name)\n state['input_image_shape'] = tuple([int(i) for i in state['input_image_shape']])\n return ImageClassifier(state)", "def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')", "def predict(self, data, version='default'):\n return self.skil.api.transformimage(\n deployment_name=self.deployment.name,\n image_transform_name=self.model_name,\n version_name=version,\n files=data\n )", "def predict(project, model, instances, version=None):\n service = discovery.build('ml', 'v1')\n name = model\n\n if version is not None:\n name += f'/versions/{version}'\n\n response = service.projects().predict(\n name=name,\n body={'instances': instances}\n ).execute()\n\n if 'error' in response:\n raise RuntimeError(response['error'])\n\n return response['predictions']", "def _predict_load(self, env_inputs):\n for app in self.system.apps:\n for node in self.system.nodes:\n data = self._load_data[app.id][node.id]\n for step in range(len(env_inputs)):\n data_index = step + self._current_index + 1\n value = None\n if data_index < len(data):\n value = data[data_index]\n else:\n value = self.environment_input.generated_load[app.id][node.id]\n env_inputs[step].generated_load[app.id][node.id] = value\n return env_inputs", "def test_predict_from_file():\n f = open('vw.file.txt', 'w')\n examples = []\n for i in xrange(len(DATA)):\n (value, all_sections) = DATA[i]\n ex = VowpalExample(i, value)\n for (namespace, section) in all_sections.items():\n ex.add_section(namespace, section)\n f.write(str(ex) + '\\n')\n f.close()\n vw = Vowpal(PATH_VW, './vw.%s', {'--passes' : '10' })\n preds = vw.predict_from_file('vw.file.txt')\n for (id, value) in preds:\n print 'prediction for %s is %s' % (id, value)", "def sequence_predict(self, load_script=False, variant=\"predict\"):\n\n if variant != 'internal':\n # Open an existing model and get the input dataset. \n # Target for historical data are expected if using previous targets as a feature.\n request_data = self._get_model_and_data(ordered_data=True) \n if type(request_data) == list:\n X, y = request_data\n else:\n X = request_data\n else:\n X = self.X_test.copy()\n y = self.y_test.copy()\n\n # Scale the targets and increase stationarity if required\n if variant != 'internal' and self.model.lag_target and (self.model.scale_target or self.model.make_stationary):\n # If using differencing, we retain original y values for inversing the transformation later\n y_orig = y.values.ravel() if self.model.make_stationary=='difference' else None\n # Apply the transformer to the targets\n y = self.model.target_transformer.transform(y)\n # Drop samples where y cannot be transformed due to insufficient lags\n X = X.iloc[len(X)-len(y):]\n\n # Set the number of periods to be predicted\n prediction_periods = self.model.prediction_periods\n # Set the number of rows required for one prediction\n self.rows_per_pred = 1\n self.diff_lags = max(self.model.stationarity_lags) if self.model.lag_target and self.model.make_stationary=='difference' else 0\n # Set property depending on whether the current sample will be included as an input, or if we only use lag observations for predictions\n self.first_pred_modifier = 1 if self.model.current_sample_as_input else 0 \n\n # Check that the input data includes history to meet any lag calculation requirements\n if self.model.lags:\n # An additional lag observation is needed if previous targets are being added to the features\n self.rows_per_pred = self.model.lags+self.first_pred_modifier+1 if self.model.lag_target else self.model.lags+self.first_pred_modifier\n # If the target is being lagged and made stationary through differencing additional lag periods are required\n if self.model.lag_target and self.model.make_stationary=='difference':\n extra_msg = \" plus an additional {} periods for making the target stationary using differencing\".format(self.diff_lags)\n # For multi-step predictions we only expect lag values, not the current period's values\n # self.rows_per_pred = self.rows_per_pred-1 if prediction_periods > 1 else self.rows_per_pred\n assert len(X) >= self.rows_per_pred + self.diff_lags, \"Insufficient input data as the model requires {} lag periods for each prediction\".format(self.rows_per_pred) + extra_msg\n\n if variant != 'internal':\n # Prepare the response DataFrame\n # Initially set up with the 'model_name' and 'key' columns and the same index as request_df\n self.response = self.request_df.drop(columns=['n_features'])\n \n # Set up a list to contain predictions and probabilities if required\n predictions = []\n get_proba = False\n if variant == 'predict_proba':\n get_proba = True\n probabilities = [] \n\n # Refresh the keras model to avoid tensorflow errors\n if self.model.using_keras:\n self._keras_refresh()\n\n if prediction_periods > 1:\n if not self.model.lag_target:\n y = None\n\n # Check that we can generate 1 or more predictions of prediction_periods each\n n_samples = len(X)\n assert (n_samples - self.rows_per_pred) >= prediction_periods, \\\n \"Cannot generate predictions for {} periods with {} rows, with {} rows required for lag observations. You may need to provide more historical data or sufficient placeholder rows for future periods.\"\\\n .format(prediction_periods, n_samples, self.rows_per_pred)\n \n # For multi-step predictions we can add lag observations up front as we only use actual values\n # i.e. We don't use predicted y values for further predictions \n if self.model.lags or self.model.lag_target:\n X = self._add_lags(X, y=y, extrapolate=self.first_pred_modifier) \n\n # We start generating predictions from the first row as lags will already have been added to each sample\n start = 0\n else:\n # We start generating predictions from the point where we will have sufficient lag observations\n start = self.rows_per_pred\n \n if self.model.lag_target or prediction_periods > 1:\n # Get the predictions by walking forward over the data\n for i in range(start, len(X) + self.first_pred_modifier, prediction_periods): \n # For multi-step predictions we take in self.rows_per_pred rows of X to generate predictions for prediction_periods\n if prediction_periods > 1:\n batch_X = X.iloc[[i]]\n \n if not get_proba:\n # Get the prediction. \n pred = self.model.pipe.predict(batch_X)\n # Flatten the predictions for multi-step outputs and add to the list\n pred = pred.ravel().tolist()\n predictions += pred\n else:\n # Get the predicted probability for each sample \n proba = self.model.pipe.predict_proba(batch_X)\n proba = proba.reshape(-1, len(self.model.pipe.named_steps['estimator'].classes_))\n probabilities += proba.tolist()\n # For walk forward predictions with lag targets we use each prediction as input to the next prediction, with X values avaialble for future periods.\n else:\n batch_X = X.iloc[i-self.rows_per_pred : i] \n # Add lag observations\n batch_y = y.iloc[i-self.rows_per_pred : i]\n batch_X = self._add_lags(batch_X, y=batch_y, extrapolate=self.first_pred_modifier)\n\n # Get the prediction. We only get a prediction for the last sample in the batch, the remaining samples only being used to add lags.\n pred = self.model.pipe.predict(batch_X.iloc[[-1],:])\n\n # Add the prediction to the list. \n predictions.append(pred)\n \n # Add the prediction to y to be used as a lag target for the next prediction\n y.iloc[i - self.first_pred_modifier, 0] = pred\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities.append(self.model.pipe.predict_proba(batch_X.iloc[[-1],:]))\n else:\n # Add lag observations to the samples if required\n if self.model.lags:\n X = self._add_lags(X, extrapolate=self.first_pred_modifier)\n\n # Get prediction for X\n predictions = self.model.pipe.predict(X)\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities = self.model.pipe.predict_proba(X)\n \n # Set the number of placeholders needed in the response\n # These are samples for which predictions were not generated due to insufficient lag periods or for meeting multi-step prediction period requirements\n self.placeholders = self.rows_per_pred + self.diff_lags - self.first_pred_modifier\n\n # Transform probabilities to a readable string\n if get_proba:\n # Add the required number of placeholders at the start of the response list\n y = [\"\\x00\"] * self.placeholders\n \n # Truncate multi-step predictions if the (number of samples - self.rows_per_pred) is not a multiple of prediction_periods\n if prediction_periods > 1 and ((n_samples-self.rows_per_pred) % prediction_periods) > 0: \n probabilities = probabilities[:-len(probabilities)+(n_samples-self.rows_per_pred)]\n \n for a in probabilities:\n s = \"\"\n i = 0\n for b in a:\n s = s + \", {0}: {1:.3f}\".format(self.model.pipe.named_steps['estimator'].classes_[i], b)\n i += 1\n y.append(s[2:])\n\n # Prepare predictions\n else:\n if prediction_periods > 1:\n # Set the value to use for nulls\n null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n\n # Truncate multi-step predictions if the (number of samples - self.placeholders) is not a multiple of prediction_periods\n if (n_samples-self.rows_per_pred) % prediction_periods > 0:\n predictions = predictions[:-len(predictions)+(n_samples-self.rows_per_pred)]\n\n # Add null values at the start of the response list to match the cardinality of the input from Qlik\n y = np.array(([null] * (self.rows_per_pred - self.first_pred_modifier)) + predictions)\n elif self.model.lag_target: \n # Remove actual values for which we did not generate predictions due to insufficient lags\n if is_numeric_dtype(y.iloc[:, 0].dtype):\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = np.NaN\n else:\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = \"\\x00\"\n # Flatten y to the expected 1D shape\n y = y.values.ravel()\n else:\n y = np.array(predictions)\n \n # Inverse transformations on the targets if required \n if variant != 'internal' and (self.model.scale_target or self.model.make_stationary):\n # Take out placeholder values before inverse transform of targets\n null_values = y[:self.rows_per_pred - self.first_pred_modifier] if prediction_periods > 1 or self.model.lag_target else []\n # Add placeholders for samples removed during differencing\n if self.model.make_stationary=='difference':\n null_values = np.append(null_values, np.repeat(null_values[0], self.diff_lags))\n y = y if len(null_values) == 0 else y[-len(predictions):]\n # Add untransformed lag values for differencing if required\n end = self.placeholders\n start = end - self.diff_lags\n y = y if y_orig is None else np.append(y_orig[start : end], y)\n\n # Apply the transformer to the test targets\n y = self.model.target_transformer.inverse_transform(y) \n\n # Remove lags used for making the series stationary in case of differencing\n if self.model.make_stationary == 'difference':\n y = y[self.diff_lags:]\n\n # Replace lags used for making the series stationary with nulls in case of differencing\n # if self.model.make_stationary == 'difference':\n #null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n # y = np.append(np.array([null]*self.diff_lags), y[self.diff_lags:])\n \n # Add back the placeholders for lag values\n if len(null_values) > 0:\n y = np.append(null_values, y)\n \n if variant == 'internal':\n return y\n\n # Add predictions / probabilities to the response\n self.response['result'] = y\n\n # Reindex the response to reset to the original sort order\n self.response = self.response.reindex(self.original_index)\n \n if load_script:\n # If the function was called through the load script we return a Data Frame\n self._send_table_description(\"predict\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def predict_single(self, data, version='default'):\n return self.skil.api.transformincrementalimage(\n deployment_name=self.deployment.name,\n image_transform_name=self.model_name,\n version_name=version,\n file=data\n )", "def test_predict_from_example_stream():\n stream = ExampleStream('vw.stream.txt')\n examples = []\n for i in xrange(len(DATA)):\n (value, all_sections) = DATA[i]\n ex = VowpalExample(i, value)\n for (namespace, section) in all_sections.items():\n ex.add_section(namespace, section)\n stream.add_example(ex)\n train = examples[:-2]\n test = examples[-2:]\n vw = Vowpal(PATH_VW, './vw.%s', {'--passes' : '10' })\n preds = vw.predict_from_example_stream(stream)\n for (id, value) in preds:\n print 'prediction for %s is %s' % (id, value)", "async def predict(events):\n global model, model_version\n\n async for event in events:\n live_version = model_table['live_version']\n\n # check stream processors model version against live version in shared table\n if model_version != live_version:\n # load in new model if out of sync\n print(f\"Loading new model {live_version}\")\n # model is locally saved pickled python code\n # but more realistically this would be s3 and a much smarter rehydrating strategy\n model_location = model_table['model_location']\n model = pickle.load(open(model_location, \"rb\"))\n model_version = live_version\n\n result = model(event)\n print(f\"\\nEvent: {event}\\nPrediction: {result}\")", "def retrain(datapath, model_version):\n df = get_df(datapath)\n X = df.drop(columns='target')\n y = df['target']\n fitted_model = fit(RF, X, y)\n\n with open(f'trained_models/model_{model_version}.joblib', 'wb') as file:\n joblib.dump(fitted_model, file)", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def get_initial_predictions(tuner, input_data, output_path, model_save_name):\n\n best_model = tuner.best_estimator()\n batch_job = best_model.transformer(1, \"ml.m5.large\", output_path=output_path.as_uri(),\n model_name=model_save_name)\n batch_job.transform(input_data.as_uri())\n # TODO: Do an ls first so we can get any/all files\n output_file = output_path / 'validation.csv.out'\n with smart.open(output_file.as_uri(), 'r', transport_params={'session': boto_session}) as f:\n predictions = pd.read_csv(f, header=None)\n return predictions", "def predict(self, data, version='default'):\n return self.skil.api.transformarray(\n deployment_name=self.deployment.name,\n transform_name=self.model_name,\n version_name=version,\n batch_record=data\n )", "def main(version: str, data_root: str,\n split_name: str, output_dir: str, config_name: str = 'predict_2020_icra.json') -> None:\n\n print('Dataset dir:', data_root)\n nusc = NuScenes(version=version, dataroot=data_root)\n helper = PredictHelper(nusc)\n dataset = get_prediction_challenge_split(split_name, data_root)\n config = load_prediction_config(helper, config_name)\n\n oracle = PhysicsOracle(config.seconds, helper)\n cv_heading = ConstantVelocityHeading(config.seconds, helper)\n covernet = CoverNetBaseline(config.seconds, helper)\n\n cv_preds = []\n oracle_preds = []\n covernet_preds = []\n for idx, token in enumerate(tqdm(dataset)):\n # if idx > 20:\n # break\n\n cv_preds.append(cv_heading(token).serialize())\n oracle_preds.append(oracle(token).serialize())\n covernet_preds.append(covernet(token).serialize()) # The slowest one, by far\n\n json.dump(cv_preds, open(os.path.join(output_dir, \"cv_preds.json\"), \"w\"))\n json.dump(oracle_preds, open(os.path.join(output_dir, \"oracle_preds.json\"), \"w\"))\n json.dump(covernet_preds, open(os.path.join(output_dir, \"covernet_preds.json\"), \"w\"))", "def test_model_by_version_get(self):\n\n # Firstly, find existing version - latest\n response = self.client().get('/model')\n latest_model = Model.from_json(response.data.decode())\n latest_version = latest_model.version\n\n # Accesses latest model\n response = self.client().get('/models/'+str(latest_version))\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model.version, latest_version)\n self.assertEqual(loaded_model, latest_model)\n\n # Accesses random model version\n random_version = random.choice(list(self.data_manipulation.versions.keys()))\n random_model = self.data_manipulation.versions[random_version]\n response = self.client().get('/models/'+str(random_version))\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model.version, random_version)\n self.assertEqual(loaded_model, random_model)\n\n # Random version is removed\n del self.data_manipulation.versions[random_version]\n response = self.client().get('/models/'+str(random_version))\n self.assertEqual(response.status_code, 404)", "def get_predictions(fitted_model_filename):\n click.echo(\"Mode: predicting probabilities.\\n\")\n defaults = get_defaults()\n\n fitted_model_filename = add_extension(fitted_model_filename)\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n # boot_data = bootstrap(new_options, mode=\"internal_test\")\n # model = boot_data['model']\n #\n # X_test_int, y_test_int = boot_data['data']\n # internal_test_proba = model.predict_proba(X_test_int)\n # internal_test_proba = np.c_[y_test_int, internal_test_proba[:, 1]]\n\n boot_data = bootstrap(new_options, mode=\"external_test\")\n model = boot_data['model']\n X_test_ext, y_test_ext = boot_data['data']\n\n # fit scaler on train data and transform test data\n scaler = StandardScaler()\n X_train, y_train = load_data(defaults, which='train')\n\n numeric_cols = X_train.select_dtypes(include=np.float64).columns.tolist()\n scaler.fit(X_train[numeric_cols])\n X_test_ext.loc[:, numeric_cols] = scaler.transform(X_test_ext[numeric_cols])\n\n external_test_proba = model.predict_proba(X_test_ext)\n external_test_proba = np.c_[y_test_ext, external_test_proba[:, 1]]\n\n # internal_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH, \"internal_test_preds.csv\")\n external_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH,\n f\"external_test_preds_{fitted_model_filename.replace('.pkl', '')}.csv\")\n # pd.DataFrame(internal_test_proba, columns=['target', 'proba']).to_csv(internal_test_results_path, index=False)\n pd.DataFrame(external_test_proba, columns=['target', 'proba']).to_csv(external_test_results_path, index=False)", "def load_predict_byname(filename, path=MODEL_PATH):\n full_path = os.path.join(path, filename)\n logging.info(\"trying to load {}\".format(full_path))\n return np.load(os.path.join(path, filename))['pred']", "def predict(self, load_script=False, variant=\"predict\"):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData']\n col_headers = ['model_name', 'n_features']\n feature_col_num = 1\n \n # An additional key field column is expected if the call is made through the load script\n if load_script:\n row_template = ['strData', 'strData', 'strData']\n col_headers = ['model_name', 'key', 'n_features']\n feature_col_num = 2\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n \n if load_script:\n # Set the key column as the index\n self.request_df.set_index(\"key\", drop=False, inplace=True)\n \n try:\n # Split the features provided as a string into individual columns\n self.X = pd.DataFrame([x[feature_col_num].split(\"|\") for x in self.request_df.values.tolist()],\\\n columns=self.model.features_df.loc[:,\"name\"].tolist(),\\\n index=self.request_df.index)\n except AssertionError as ae:\n err = \"The number of input columns do not match feature definitions. Ensure you are using the | delimiter and that the target is not included in your input to the prediction function.\"\n raise AssertionError(err) from ae\n \n # Convert the data types based on feature definitions \n self.X = utils.convert_types(self.X, self.model.features_df, sort=False)\n\n if variant in ('predict_proba', 'predict_log_proba'):\n # If probabilities need to be returned\n if variant == 'predict_proba':\n # Get the predicted probability for each sample \n self.y = self.model.pipe.predict_proba(self.X)\n elif variant == 'predict_log_proba':\n # Get the log probability for each sample\n self.y = self.model.pipe.predict_log_proba(self.X)\n \n # Prepare a list of probability by class for each sample\n probabilities = []\n\n for a in self.y:\n s = \"\"\n i = 0\n for b in a:\n s = s + \", {0}: {1:.3f}\".format(self.model.pipe.named_steps['estimator'].classes_[i], b)\n i = i + 1\n probabilities.append(s[2:])\n \n self.y = probabilities\n \n else:\n # Predict y for X using the previously fit pipeline\n self.y = self.model.pipe.predict(self.X)\n\n # Inverse transformations on the targets if required\n if self.model.scale_target or self.model.make_stationary:\n # Apply the transformer to the test targets\n self.y = self.model.target_transformer.inverse_transform(self.y) \n\n # Prepare the response\n self.response = pd.DataFrame(self.y, columns=[\"result\"], index=self.X.index)\n \n if load_script:\n # Add the key field column to the response\n self.response = self.request_df.join(self.response).drop(['n_features'], axis=1)\n \n # If the function was called through the load script we return a Data Frame\n self._send_table_description(\"predict\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']", "def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)", "def predict_json(project, model, instances, version=None):\n\n service = googleapiclient.discovery.build('ml', 'v1')\n name = 'projects/{}/models/{}'.format(project, model)\n\n if version is not None:\n name += '/versions/{}'.format(version)\n\n response = service.projects().predict(\n name=name,\n body={'instances': instances}\n ).execute()\n\n if 'error' in response:\n raise RuntimeError(response['error'])\n\n return response['predictions']", "def load_dataset(name, version):\n dataset_dir = os.path.join(DATA_DIR, name)\n dataset_ver_dir = os.path.join(dataset_dir, version)\n\n if not os.path.isdir(dataset_dir):\n raise FileNotFoundError(\"Dataset dir not found\")\n if not os.path.isdir(dataset_ver_dir):\n raise FileNotFoundError(\"Dataset version dir not found\")\n\n train_data = load_kg_file(os.path.join(dataset_ver_dir, \"train.txt.gz\"))\n valid_data = load_kg_file(os.path.join(dataset_ver_dir, \"valid.txt.gz\"))\n test_data = load_kg_file(os.path.join(dataset_ver_dir, \"test.txt.gz\"))\n\n dataset = KgDataset()\n dataset.load_triples(train_data, tag=\"train\")\n dataset.load_triples(valid_data, tag=\"valid\")\n dataset.load_triples(test_data, tag=\"test\")\n return dataset", "def predict_api():\n pass" ]
[ "0.6604535", "0.62928057", "0.62380624", "0.6178517", "0.6116927", "0.6114995", "0.6087761", "0.6059424", "0.6027648", "0.5998679", "0.5942684", "0.59116244", "0.5828584", "0.581421", "0.58066016", "0.57952", "0.57886976", "0.5787052", "0.5781026", "0.5778934", "0.5777145", "0.57666916", "0.57407844", "0.573099", "0.5720386", "0.5718783", "0.5716721", "0.5715909", "0.5702798", "0.5699744" ]
0.70246345
0
Retrieve the HTML from the website at `url`.
def get_html(url): return urllib.request.urlopen(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_html_from_url(url):\n request = requests.get(url)\n data = request.text\n return data", "def get_html(url):\n req = urllib.request.Request(\n url,\n headers={\n 'User-Agent': 'Python Learning Program',\n 'From': '[email protected]'\n }\n )\n resp = urllib.request.urlopen(req)\n\n if resp.code == 200:\n return resp.read() # returns the html document\n else:\n return None", "def getHtml(url):\n return urlopen(url)", "def retrieve_html(url):\n req = urllib2.Request(url)\n req.add_header('User-Agent', 'Just-Crawling 0.1')\n request = None\n status = 0\n try:\n logger.info(\"Crawling %s\" % url)\n request = urllib2.urlopen(req)\n except urllib2.URLError as e:\n logger.error(\"Exception at url: %s\\n%s\" % (url, e))\n except urllib2.HTTPError as e:\n status = e.code\n except:\n return\n if status == 0:\n status = 200\n\n try:\n data = request.read()\n except:\n return\n\n return str(data)", "def get_html(url):\n\n r = requests.get(url, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'\n })\n html = r.text\n\n return html", "def get_html(url: str) -> str:\n headers = {\n 'User-Agent': Config.Scraper.user_agent,\n }\n logging.debug('User-Agent: ' + headers['User-Agent'])\n r = requests.get(url.strip(), headers=headers)\n r.encoding = 'utf8'\n print('[Status Code: %s]' % r.status_code)\n if r.status_code != 200:\n raise Exception('Error in get HTML!')\n return r.text", "def getHTML(url): \n return urlopen(url)", "def get_html(url):\n response = requests.get(url)\n response.encoding = 'utf-8'\n return response.text", "def getHtml(url):\n log.finer(\" Opening URL: %s\" % url)\n handle = MozURLopener().open(url)\n html = handle.read()\n handle.close()\n return html", "def getHtml(_url):\n try:\n logger.info('getHtml: Requesting: %s' % _url)\n\n response = urllib2.urlopen(_url)\n\n #download data\n html_ = response.read()\n logger.debug('getHtml: Retrieved data: %s' % html_)\n\n return html_\n\n except urllib2.HTTPError, e:\n logger.error('getHtml: HTTPError: ' + str(e.code))\n\n except urllib2.URLError, e:\n logger.error('getHtml: URLError: ' + str(e.reason))\n\n except httplib.HTTPException, e:\n logger.error('getHtml: HTTPException: ', str(e))\n\n except Exception:\n logger.exception('getHtml: Unhandled exception: ')", "def scrape_url(url):\n html = requests.get(url).text\n return scrape_html(html)", "def download_html(url: str):\n response = urllib.request.urlopen(url)\n return response.read()", "def fetchUrl(self, url):\n self.driver.get(url)\n html = self.driver.page_source\n return html", "def getHtml(self, url):\n r = requests.get(url)\n html = r.content\n return html", "def get_html(url):\n print('fetching', url)\n try:\n re = requests.get(url, timeout=1, stream=True)\n print('success!')\n # limit file size to 1mb\n html = re.raw.read(1000000+1, decode_content=True)\n if len(html) > 1000000:\n raise ValueError('response too large')\n return html\n except:\n raise TimeoutError('request timed out')", "def extract_page_html(url):\n\n from urllib.request import Request, urlopen\n\n request_headers = {'User-Agent': 'Mozilla/5.0'}\n req = Request(url, headers=request_headers)\n page = urlopen(req).read()\n\n return page", "def get_page(self, url):\n \"\"\" @param url: Url we want to crawl\"\"\"\n \"\"\" @type url: String \"\"\"\n \"\"\"@return the page\"\"\"\n try:\n u = urlopen(url)\n html = u.read().decode('utf-8')\n # except Exception as e:\n # logging.exception(e)\n finally:\n print(\"Closing\")\n u.close()\n return html", "def getHTML(url):\n\n time.sleep(2.00)\n html = urllib2.urlopen(url,timeout=10).read()\n urllib2.urlopen(url).close()\n\n soup = BeautifulSoup(html)\n\n return soup", "def fetch_url(url):\n try:\n soup = bs(urlopen(url).read(), 'html.parser')\n return soup\n except:\n print \"Couldnot download the content from the URL\", url\n return \"\"", "def get_html(url):\n try:\n response = requests.get(url)\n except requests.exceptions.ConnectionError as e:\n print \"Site %s isn't accessibility\" % BASE_URL\n except requests.exceptions.ReadTimeout as e:\n print \"Error: Read Timeout\"\n except requests.exceptions.HTTPError as e:\n print \"Get an HTTPError:\", e.message\n return response.text", "def _html(url: str) -> BeautifulSoup:\n with urllib3.PoolManager() as manager:\n res = manager.request(\"GET\", url, headers={\"User-Agent\": ua.chrome})\n if res.status != 200:\n raise Exception(res.status)\n soup = BeautifulSoup(res.data, \"html.parser\")\n return soup", "def get_html(url):\n # time.sleep(float(random.randint(1, 500)) / 100)\n requests.adapters.DEFAULT_RETRIES = 5\n headers = {\n 'Content-Type': \"application/json;charset=uf8\"\n }\n\n try:\n response = requests.get(url, headers=headers, stream=False, timeout=10)\n except Exception as e:\n print(e)\n print('html连接异常')\n return 'html_err'\n\n s = requests.session()\n s.keep_alive = False\n response.close()\n\n if response.status_code == 200:\n print(f'{url}\\n页面请求成功')\n response.encoding = 'utf8'\n # print(type(response)) # <class 'requests.models.Response'>\n return response.text # 输出网页文本\n # return response.json() # 输入的地址内容是json\n # return response.content # 输入的地址内容是文件,比如图片、视频\n else:\n print('请求网页源代码错误, 错误状态码:', response.status_code)\n return response.status_code", "def get_page(url):\n request = Request(url)\n request.add_header('User-Agent',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)')\n cookie_jar.add_cookie_header(request)\n response = urlopen(request)\n cookie_jar.extract_cookies(response, request)\n html = response.read()\n response.close()\n cookie_jar.save()\n return html", "def getSoup(url):\n return BeautifulSoup(getHtml(url), 'lxml')", "def get_page_html(url: str) -> Union[int, str]:\n req = requests.get(url=url)\n if req.status_code == 200:\n return req.text\n raise requests.exceptions.RequestException('')", "def get_html(website_url):\n\n website_response = requests.get(website_url, headers=headers_req)\n if website_response.status_code != requests.codes.ok:\n raise SiteUnreachableException()\n return BeautifulSoup(website_response.content, 'html.parser')", "def get_html_content(self, url):\n\n req = urllib2.Request(url, headers=self.HEADER)\n page = urllib2.urlopen(req)\n soup = BeautifulSoup(page)\n\n return soup", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def load_page(url):\n try:\n response = urllib2.urlopen(url)\n html = response.read()\n\n if response.code == 200:\n body_text = html\n return html\n return \"\"\n except Exception:\n return \"\"", "def get_webpage_content(url):\n request = urllib2.Request(url)\n page = urllib2.urlopen(request)\n soup = BeautifulSoup(page.read())\n return unicode(soup)" ]
[ "0.8416072", "0.8373522", "0.8318817", "0.82229555", "0.8126572", "0.8060912", "0.79980475", "0.79878724", "0.7985769", "0.7910303", "0.7863441", "0.78517896", "0.78509027", "0.78275955", "0.77782696", "0.77640617", "0.7740524", "0.7684471", "0.76605576", "0.7652579", "0.7646284", "0.76087666", "0.7604561", "0.7583465", "0.75759715", "0.7528971", "0.7514273", "0.74873275", "0.7480644", "0.7453803" ]
0.8443978
0
Get the HTML of online clubs with Penn.
def get_clubs_html(): url = 'https://ocwp.apps.pennlabs.org' return get_html(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_courses_html():\r\n r = requests.get(URL_CS_ALL_REQ)\r\n if r.status_code == 200:\r\n return r.text\r\n else:\r\n return None", "def test_get_html(self):\r\n _html = self.peer_grading.get_html()", "def show_clubs(self):\n self.client.get(f\"{host}/board\")", "def get(code) -> Sauce:\n url = 'https://nhentai.net/g/%s/' % code\n doc = get_document(url)\n info = doc.xpath('//div[@id=\"info\"]')[0]\n\n #title = info[0].text\n #pages = int( info[3].text.split()[0] )\n title = info.xpath('./h1/text()')[0]\n pages = int( info.xpath('./div[contains(text(), \"pages\")]')[0].text.split()[0] )\n \n tags = [ tag.text[:-1] for tag in info.xpath('.//div[contains(text(), \"Tags:\")]')[0].getchildren()[0] ]\n artists = [ tag.text[:-1] for tag in info.xpath('.//div[contains(text(), \"Artists:\")]')[0].getchildren()[0] ]\n\n # get the gallery codes by checking the thumbnails\n # this returns <a> which contains <img>\n image_urls = [ get_image_url(a[0]) for a in doc.xpath('//a[@class=\"gallerythumb\"]') ]\n\n return Sauce(title, pages, tags, artists, url, image_urls)", "def get_club_info(url):\n\tbase_url = \"http://fas-mini-sites.fas.harvard.edu/osl/grouplist\"\n\tnum = 0\n\n\tclub_url = base_url + url\n\n\tcategoryArr= []\n\n\tr = rq.get(club_url)\n\tsoup = BeautifulSoup(r.text)\n\tinfoClub = [ '' for i in range(9) ]\n\t#0: clubid\n\tclubid = url.split(\"=\")[-1]\n\tinfoClub[0] = clubid\n\t#1: clubname\n\tinfoClub[1] = soup.find(\"h2\").text\n\t \n\t# info = soup.p.get_text()\n\tinfo = soup.p.get_text().encode('ascii','ignore')\n\t#2: club description\n\tinfoClub[2] = info\n\n\tstuff = soup.ul\n\n\tstuffArray =[]\n\n\tstuffArray.append(stuff.li)\n\n\tcount = 0\n\tfor more in stuff.li.next_siblings:\n\t if (count%2 == 1):\n\t stuffArray.append(more)\n\t count +=1\n\n\t#info New: categories do .a trick\n\n\tcatRaw = BeautifulSoup(str(stuffArray[0]))\n\tcats = catRaw.find_all('a')\n\n\tfor cat in cats:\n\t catStr = []\n\t tempCat = str(cat.get('href'))\n\t catStr.append(clubid)\n\t catStr.append(tempCat[18:])\n\t categoryArr.append(catStr)\n\n\t#info 3: number of members\n\tmemStr = (str(stuffArray[1]))[49:-10]\n\n\t# print memStr\n\tif memStr == '1-9':\n\t memStr = 0\n\telif memStr == '10-25':\n\t memStr = 1\n\telif memStr == '26-50':\n\t memStr = 2\n\telif memStr == '76-100':\n\t memStr =3\n\telse:\n\t memStr = 4\n\t# print memStr\n\n\tinfoClub[3] = str(memStr)\n\n\t#inf 4: involvement\n\tinvolvementStr = str(stuffArray[2])\n\tinfoClub[4] = involvementStr[43:-10]\n\n\t#info 5: group email\n\temailRaw = BeautifulSoup(str(stuffArray[3]))\n\temail = emailRaw.a.get('href')\n\tinfoClub[5] = str(email)\n\n\t#info 6: group website\n\twebRaw = BeautifulSoup(str(stuffArray[4]))\n\tweb = webRaw.a.get('href')\n\tinfoClub[6] = str(web)\n\n\t#info 7: Mailing address\n\tmailingRaw = BeautifulSoup(str(stuffArray[5]))\n\tmail = mailingRaw.ul\n\n\tmailStr = (str(mail.li))[4:-5] + ','\n\n\tcheck = 0\n\tfor line in mail.li.next_siblings:\n\t check +=1\n\t if (check % 2 == 0):\n\t mailStr += (str(line))[4:-5]+ ','\n\n\tmailStr = mailStr[:-1]\n\tif (num != 204):\n\t mailStr.encode('ascii','ignore')\n\n\t if len(mailStr) > 255:\n\t print 'Error: mailing address too long'\n\n\t infoClub[7] = mailStr\n\telse:\n\t infoClubs[7] = \"hardcode\"\n\n\n\t#info 8: month of election\n\tstring1 = str(stuffArray[6])\n\tinfoClub[8] = string1[58:-10]\n\t\n\tprint \"Got all info of\", infoClub[0], infoClub[1]\n\n\treturn infoClub, categoryArr", "def biological_science_news():\n\n return general_scraper(['http://mesva.univaq.it/?q=avvisi/cl-clm/52672'])", "def retrieve_episode_html(url):\n response = requests.get(url)\n return response.content", "def get_challenge_html(self):\r\n\r\n context = {\r\n 'top_scores': self.puzzle_leaders()}\r\n\r\n return self.system.render_template('folditchallenge.html', context)", "def get_club_description(club):\n elts = get_elements_with_class(club, 'em', '')\n if len(elts) < 1:\n return ''\n return elts[0].text\n\n return ''", "def get_html_document(self, team, src_type, season=None):\n if src_type == 'roster':\n # preparing url to team's roster page\n team_url_component = team.team_name.lower().replace(\" \", \"\")\n # creating url like 'https://www.nhl.com/ducks/roster'\n if season is not None:\n team_url = \"/\".join((\n self.NHL_SITE_PREFIX,\n team_url_component,\n self.NHL_SITE_ROSTER_SUFFIX,\n str(season)))\n else:\n team_url = \"/\".join((\n self.NHL_SITE_PREFIX,\n team_url_component,\n self.NHL_SITE_ROSTER_SUFFIX))\n elif src_type == 'system':\n # preparing url to team's prospects page\n team_url_component = team.team_name.lower().replace(\" \", \"\")\n team_site_prefix = self.TEAM_SITE_PREFIX.replace(\n \"%s\", team_url_component)\n # creating url like\n # 'http://ducks.ice.nhl.com/club/roster.htm?type=prospect'\n team_url = \"\".join((\n team_site_prefix,\n self.TEAM_SITE_ROSTER_SUFFIX))\n elif src_type == 'contracts':\n # preparing url to team's prospects page\n team_url_component = team.team_name.lower().replace(\" \", \"\")\n team_url = \"\".join((\n self.CAPFRIENDLY_SITE_PREFIX, team_url_component))\n\n try:\n req = requests.get(team_url)\n except requests.exceptions.ConnectionError:\n # TODO: returning empty document tree\n return None\n return html.fromstring(req.text)", "def create_full_pitcher_html(url):\n # raw_pitcher_list = scrape_razzball_pitchers(url)\n raw_pitcher_list = fantasy_pro_players(url)\n return create_full_pitcher(raw_pitcher_list)", "def GET(self):\n web.header(\"Content-Type\",\"text/html; charset=utf-8\")\n\n data = web.input(module=\"module\", start=\"start\", num=\"num\")\n module = data[\"module\"]\n start = data[\"start\"]\n num = data[\"num\"]\n\n module = (1 if module == \"module\" else module)\n start = (1 if start == \"start\" else start)\n num = (1 if num == \"num\" else num)\n\n news = api.get_news_fromDB(int(module), int(start), int(num))\n\n #return news[0][\"maindiv\"]\n html = \"\"\n for item in news:\n html = html + item[\"maindiv\"]\n return html", "def get_course_page(self):\n\n print(\"Course URL: {}\".format(self.course_url))\n try:\n self.course_page = BeautifulSoup(requests.get(self.course_url).text, \"lxml\")\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n sys.exit(1)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n sys.exit(1)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n sys.exit(1)\n except requests.exceptions.RequestException as err:\n print(\"Oops: Something Else\", err)\n sys.exit(1)", "def get_all_clubs():\n\turl = \"http://fas-mini-sites.fas.harvard.edu/osl/grouplist\"\n\n\tr = rq.get(url)\n\tsoup = BeautifulSoup(r.text)\n\tlinks = soup.find_all('a')\n\n\tlinkArray = []\n\tnameArray = []\n\n\tfor link in links:\n\t\tl = link.get('href')\n\t\tlinkArray.append(l)\n\t\tname = link.get_text()\n\t\tname = name.encode('ascii','ignore')\n\t\tnameArray.append(name)\n\n\treturn nameArray, linkArray", "def get_clubs(soup):\n\n return get_elements_with_class(soup, 'div', 'box')", "def get_university_news():\n\tresponse = requests.get('https://cumoodle.coventry.ac.uk')\n\tmoodleContent = BeautifulSoup(response.content, 'html.parser')\n\tpostLinks =[]\n\theadings = []\n\tdates = []\n\tdata = \"\"\n\tfor title in moodleContent.findAll('div',{'class':'subject'}):\n\t\theadings.append(title.text+\"</a></p>\")\n\tfor link in moodleContent.findAll('div',{'class':'link'}):\n\t\tpostLinks.append(\"<p style = 'font-size:120%;'> <a href = '\"+link.a['href']+\"'>\") \n\tfor date in moodleContent.findAll('div',{'class':'author'}):\n\t\tdates.append(\"<p style='font-size:90%;'>\"+date.text[18:]+\"</p>\")\n\tresults = zip(postLinks, headings, dates)\n\tfor result in results:\n\t\tdata+=(''.join(result))\n\treturn data", "def get_song_html(self, url):\n request = urllib.request.Request(url)\n request.add_header(\"Authorization\", \"Bearer \" + self.client_access_token)\n request.add_header(\"User-Agent\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'\")\n\n page = urllib.request.urlopen(request)\n html = BeautifulSoup(page, \"lxml\")\n print(\"Scraped: \" + url)\n return html", "def test_get_urls():\n\n year = \"2018\"\n week = \"1\"\n\n assert ff_team.get_game_urls(year, week).get(\n 'Atlanta Falcons') == \"https://www.pro-football-reference.com/boxscores/201809060phi.htm\"", "def get_html():\n url = 'https://честныйзнак.рф/vopros-otvet/'\n session = requests.Session()\n response = session.get(url)\n page = response.text\n soup = BeautifulSoup(page, 'html.parser')\n return soup", "def scrape_layer(self):\n\t\t# azlyrics structure\n\t\ttry:\n\t\t\t#page = urlopen('http://www.azlyrics.com/lyrics/edsheeran/theateam.html')\n\t\t\tpage = urlopen('http://www.azlyrics.com/lyrics/edsheeran/theteam.html')\n\t\t\tsoup = BeautifulSoup(page.read(), \"lxml\")\n\t\t\tcontent_div = soup_az.find(\"div\", class_=\"col-xs-12 col-lg-8 text-center\")\n\t\t\tlyric_div = content_div.find(\"div\", class_=None)\n\t\t\t# change line break tag to dots\n\t\t\tprocessed = re.sub('<br\\s*?>', '.', lyric_div.text)\n\t\t\tlines = processed.split()\n\t\t\treturn lines\n\t\texcept URLError: # if no url or other connection errors, skip\n\t\t\tprint(\"no url\")\n\t\t\tpass\n\n\t\t# MetroLyrics structure\n\t\ttry:\n\t\t\t#page = urlopen('http://www.metrolyrics.com/a-team-lyrics-ed-sheeran.html')\n\t\t\tpage = urlopen('http://www.metrolyrics.com/aheeran.html')\n\t\t\tsoup = BeautifulSoup(page.read(), \"lxml\")\n\t\t\tlyrics = soup.find(\"div\", {\"id\": \"lyrics-body-text\"})\n\t\t\tprocessed = re.sub('\\n', '. ', lyrics.text)\n\t\t\tlines = processed.split()\n\t\t\treturn lines\n\t\texcept URLError: # if no url or other connection errors, skip\n\t\t\tprint(\"no url\")\n\t\t\tpass\n\n\t\t# Genius Lyrics structure\n\t\ttry:\n\t\t\t#page = urlopen('https://genius.com/Ed-sheeran-the-a-team-lyrics')\n\t\t\turl = 'https://genius.com/Ed-sheeran-the-a-team-lyrics'\n\t\t\tclient = webdriver.PhantomJS()\n\t\t\tclient.get(url)\n\t\t\tsoup = BeautifulSoup(client.page_source, 'lxml')\n\t\t\tlyrics_div = soup.find(\"div\", class_=\"lyrics\")\n\t\t\tlyrics_content = lyrics_div.find(\"p\")\n\t\t\t#print(lyrics_content)\n\t\t\tlyrics_a = lyrics_content.find_all(\"a\")\n\t\t\tlines = []\n\t\t\tfor obj in lyrics_a:\n\t\t\t\tfor c in obj.contents:\n\t\t\t\t\tif len(c) > 0: lines.append(c + '.')\t\t\t\n\t\t\treturn lines\n\t\t\t#lyrics_list.append(c)\n\t\t\t#print(lyrics_list)\n\t\texcept URLError:\n\t\t\tprint(\"no url\")\n\t\t\tpass\n\n\t\t# Song Lyrics structure IMPLEMENT THIS -------------------------------------------\n\t\ttry:\n\t\t\tpass\n\t\texcept URLError:\n\t\t\tprint(\"no url\")", "def environmental_science_news():\n\n return general_scraper(['http://mesva.univaq.it/?q=avvisi/cl-clm/52671'])", "async def mcwiki(self, ctx, *, query):\n url = f\"https://minecraft.gamepedia.com/index.php?search={query}\"\n response = requests.get(url)\n content = response.content.decode('utf-8')\n await ctx.send(response.url)", "def getHtml(self, url):\n r = requests.get(url)\n html = r.content\n return html", "def club_info(self, cid):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/clubInfo.phtml?cid=' + cid, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n plist = list()\r\n for i in soup.find('table', cellpadding=2).find_all('tr')[1:]:\r\n plist.append('%s\\t%s\\t%s\\t%s\\t%s' % (\r\n i.find_all('td')[0].text, i.find_all('td')[1].text, i.find_all('td')[2].text, i.find_all('td')[3].text,\r\n i.find_all('td')[4].text))\r\n return soup.title.text, plist", "def get_coursepage(code):\n url = 'http://gla.ac.uk/coursecatalogue/course/?code=' + code\n print url\n coursepage = requests.get(url)\n return coursepage", "def _docs():\n url = \"https://vanheeringen-lab.github.io/seq2science\"\n if not webbrowser.open(url):\n print(url)", "def _run_wiki_nogui(self):\n # start wikipedia page download\n self._log_print(msg_WHITE=\"Accessing Wikipedia...\")\n\n # download wikipedia page and track progress\n for message in self._get_preload_progress():\n if \"Searching for\" in message:\n print(f\"Searching for: {GREEN}{self.ALBUM}{RESET} by \"\n f\"{GREEN}{self.ALBUMARTIST}\")\n elif \"Using offline\" in message:\n self._log_print(msg_GREEN=\"Using offline cached page insted \"\n \"of web page\")\n elif \"Found at\" in message:\n self._log_print(msg_GREEN=\"Found at: \", msg_WHITE=self.url)\n else:\n self._log_print(msg_WHITE=message)\n\n # get error messages\n error_msg = self.get_wiki()\n if error_msg:\n self._log_print(msg_GREEN=error_msg)\n return\n\n if not we_are_frozen():\n # basic html textout for debug\n self.basic_out()\n\n # find release date\n self._log_print(msg_GREEN=\"Found release date:\",\n msg_WHITE=self.get_release_date())\n\n # find list of genres\n self._log_print(msg_GREEN=\"Found genre(s)\",\n msg_WHITE=\"\\n\".join(self.get_genres()))\n\n # get and print out page contents\n self._log_print(msg_GREEN=\"Found page contents\",\n msg_WHITE=\"\\n\".join(self.get_contents()))\n\n # extract track list\n self.get_tracks()\n\n # extract personel names\n self._log_print(msg_GREEN=\"Found aditional personel\")\n self.get_personnel()\n if not we_are_frozen():\n print(self.personnel_2_str())\n\n # extract writers, composers\n self._log_print(msg_GREEN=\"Found composers\",\n msg_WHITE=\"\\n\".join(flatten_set(self.get_composers())))\n\n if not we_are_frozen():\n # save to files\n self._log_print(msg_WHITE=\"Writing to disk\")\n self.disk_write()\n\n # print out found tracklist\n self._log_print(msg_GREEN=\"Found Track list(s)\")\n self.print_tracklist()\n\n # select genre\n if not self.GENRE:\n if not self.genres:\n print(CYAN + \"Input genre:\", end=\"\")\n self.genre = input()\n else:\n print(CYAN + \"Specify which genre you want to write: [1.]\")\n for i, gen in enumerate(self.genres, 1):\n print(f\"{i}. {gen}\")\n\n print(\"Input number:\", CYAN, end=\"\")\n index = input()\n try:\n index = int(index) - 1\n except ValueError:\n index = 0\n\n self.GENRE = self.genres[index]\n\n # decide what to do with artists\n print(CYAN + \"Do you want to assign artists to composers? ([y]/n)\",\n RESET, end=\" \")\n if to_bool(input()):\n self.merge_artist_composers()\n\n # decide if you want to find lyrics\n print(CYAN + \"\\nDo you want to find and save lyrics? ([y]/n): \" +\n RESET, end=\"\")\n\n # download lyrics\n self.save_lyrics(to_bool(input()))\n\n print(CYAN + \"Write data to ID3 tags? ([y]/n): \" + RESET, end=\"\")\n if to_bool(input()):\n if not self.write_tags():\n self._log_print(\n msg_WHITE=\"Cannot write tags because there are no \"\n \"coresponding files\")\n else:\n self._log_print(msg_GREEN=\"Done\")", "def main():\n\n print(\n \"\\nWELCOME!!!HERE YOU CAN GET THE CLUB CAREER INFORMATION\"\n \" REGARDING YOUR FAVOURITE FOOTBALL PLAYER FROM WIKIPEDIA!\"\n )\n while True:\n name = get_player_name()\n\n # Concatenating the Football players name to the wikipedia URL\n # to send a GET request\n req = requests.get(\"https://en.wikipedia.org/wiki/\" + name)\n\n if req.status_code == 200:\n\n # BeautifulSoup object for parsing through the html text\n soup = BeautifulSoup(req.text, \"html.parser\")\n\n # Searching for the content box with css selector '.mw-headline'\n titles = soup.select(\".mw-headline\")\n\n # collecting info regarding club career only\n club_info = get_club_info(titles)\n\n if club_info:\n # For presenting the collected information\n display_info(club_info)\n\n key = (\n input('\\nPress \"y\" to continue or any other key to quit: ')\n .strip()\n .lower()\n )\n if key != \"y\":\n break\n\n else:\n print(\"URL NOT FOUND!!\")\n\n key = (\n input('\\nPress \"y\" to continue or any other key to quit: ')\n .strip()\n .lower()\n )\n if key != \"y\":\n break", "def news_web():\n get_news()", "async def fetch_html(url: str,\n session: aiohttp.ClientSession,\n **kwargs) -> str:\n\n resp = await session.request(method=\"GET\", url=url, **kwargs)\n resp.raise_for_status()\n logger.info(\"Got response [%s] for URL: %s\", resp.status, url)\n html = await resp.text()\n return html" ]
[ "0.6088996", "0.5787258", "0.5756878", "0.5625098", "0.5575333", "0.5504629", "0.54450583", "0.5413097", "0.5376512", "0.53630584", "0.53621113", "0.5361559", "0.5302013", "0.5288246", "0.52757317", "0.524258", "0.5232825", "0.521696", "0.52091354", "0.5189585", "0.5163789", "0.51559156", "0.51417553", "0.5127553", "0.51226646", "0.5107118", "0.51010716", "0.50905687", "0.50776964", "0.5077321" ]
0.7841981
0
Returns a list of elements of type "elt" with the class attribute "cls" in the HTML contained in the soup argument. For example, get_elements_with_class(soup, 'a', 'navbar') will return all links with the class "navbar". Important to know that each element in the list is itself a soup which can be queried with the BeautifulSoup API. It's turtles all the way down!
def get_elements_with_class(soup, elt, cls): return soup.findAll(elt, {'class': cls})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_and_get_all_elements_by_class_name(element, class_name):\r\n if element is None or not class_name:\r\n return []\r\n try:\r\n return element.find_elements_by_class_name(class_name) \r\n except NoSuchElementException:\r\n return []", "def get_tags_with_class(self, class_name: str):\n return self.soup.find_all(attrs={'class': class_name})", "def get_by_cls(self, cls: GObject.GType) -> typ.List[Gst.Element]:\n elements = self._pipeline.iterate_elements()\n if isinstance(elements, Gst.Iterator):\n # Patch \"TypeError: ‘Iterator’ object is not iterable.\"\n # For versions we have to get a python iterable object from Gst iterator\n _elements = []\n while True:\n ret, el = elements.next()\n if ret == Gst.IteratorResult(1): # GST_ITERATOR_OK\n _elements.append(el)\n else:\n break\n elements = _elements\n\n return [e for e in elements if isinstance(e, cls)]", "def find_elements_by_class(self,param={},ignore_error_handle = False):\n message = {};\n step = 'find all elements by class name ' + str(param.get('class',None)) + ' on current page';\n class_name = str(param.get('class',None));\n try:\n elements = self.driver.find_elements(by=By.CLASS_NAME,value=class_name);\n message = self.feedback.feedback_action_ok(step);\n message['elements'] = elements;\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def extract_all_tags(final_link, driver):\n\n #driver = webdriver.Chrome(executable_path=\"ChromeDriver/chromedriver.exe\")\n driver.get(str(final_link))\n classes = []\n tags = ['div', 'td', 'li', 'a']\n for tag in tags:\n a = driver.find_elements_by_tag_name(str(tag))\n b = len(a)\n for i in range(b):\n try:\n if a[i].get_attribute(\"class\") == None or a[i].get_attribute(\"class\") == '' or a[i].get_attribute(\"class\") == ' ' or a[i].get_attribute(\"class\") == ' ':\n continue\n else:\n className = a[i].get_attribute(\"class\").strip().split(\" \")\n for classN in className:\n classes.append(str(tag) + '.' + str(classN))\n\n except:\n continue\n\n #driver.quit()\n classes = list(dict.fromkeys(classes))\n return(classes)", "def filter(self, cls):\n return ElementList([x for x in self._elements if isinstance(x, cls)])", "def get_classes(html):\n # elements = html.find_all(\"span\", \"code\")\n # titles = html.find_all(\"span\", \"title\")\n # classes = []\n # for i in range(len(elements)):\n # item = elements[i]\n # tit = titles[i]\n # classes += [(item.text.replace('\\xa0', ' '), tit.text.replace('\\xa0', ' '))]\n # return classes", "def find_ahref_by_class(tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n href = str(item.find('a'))\n href = href.split('\"')[1]\n result.append(href)\n return result", "def EnrolledClasses(self,html): \n classes = []\n soup = BeautifulSoup(html)\n for element in soup.find_all(\"input\"):\n if element[\"name\"] == \"TITLE\" and element[\"value\"]:\n classes.append(element.get(\"value\"))\n return classes", "def classes(attrs):\n return attrs.get('class', '').split()", "def _find(self, finder, finder_kwargs=None):\n finder_kwargs = finder_kwargs or {}\n\n elements = None\n elem_list = []\n\n try:\n elements = finder(**finder_kwargs)\n if not isinstance(elements, list):\n elements = [elements]\n\n except (\n NoSuchElementException,\n StaleElementReferenceException,\n ):\n # This exception is sometimes thrown if the page changes\n # quickly\n pass\n\n if elements:\n elem_list = [\n self.element_class(element, self, finder_kwargs) for element in elements\n ]\n\n return elem_list", "def web_elements(self):\n if isinstance(self._selector, tuple):\n return self._driver.find_elements(*self._selector)", "def find_class_instances(class_, class_to_find) -> typing.List[str]:\n return [\n name\n for name, _ in inspect.getmembers(\n class_, lambda x: isinstance(x, class_to_find)\n )\n ]", "def get_class_list(self):\n t = []\n for cls in self.classes:\n if not self.is_opaque(cls.classobj):\n t.append(cls)\n elif cls.parents or cls.childs:\n t.append(cls)\n \n return t", "def _getChildrenOfType(self, elementClass):\n method = getattr(self.__class__, \"_getChildrenOfType\" + elementClass.__name__)\n return method(self)", "def get_elements(self, css=None, text=None):\n if css is None and text is None:\n raise ValueError()\n\n # Use ordered sets so we don't muck up the ordering if the caller specifies\n # two or more arguments. This is a bit over-convoluted for having only two\n # ways to query (css and text) but the pattern makes it easy to plug in\n # more ways.\n items = None\n def update(new_items):\n nonlocal items\n if items == None:\n items = OrderedSet(new_items)\n else:\n items = items & OrderedSet(new_items)\n\n if text is not None:\n update([e for e in get_elements(self, css=\"*\") if e.text == text])\n if css is not None:\n update(self.find_elements_by_css_selector(css))\n\n return items", "def find_all(m, cls):\n return [node for node in ast.walk(m) if isinstance(node, cls)]", "def find_text_content_by_class(bs, tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n item_text = strip_tags(str(item))\n result.append(\" \".join(item_text.split()))\n return result", "def filter_by_class(objects, cls):\n if cls is not None:\n filtered = []\n classes = cls if isinstance(cls, tuple) else (cls,)\n for o in objects:\n valid = False\n for c in classes:\n try:\n if o.is_valid(c):\n valid = True\n break\n except AttributeError:\n continue\n if valid:\n filtered.append(o)\n return filtered\n else:\n return list(objects)", "def check_and_get_all_elements_by_css_selector(element, selector):\r\n if element is None or not selector:\r\n return [] \r\n try:\r\n return element.find_elements_by_css_selector(selector)\r\n except NoSuchElementException:\r\n return []", "def add_class_to_tag(markup, tag_type, classes):\n soup = BeautifulSoup(markup, \"html.parser\")\n elements = soup.find_all(tag_type)\n\n for el in elements:\n el['class'] = el.get('class', []) + [classes]\n\n return soup.prettify(soup.original_encoding)", "def get_classes(username, password):\n\n def chunk(l, size):\n return [l[i:i+size] for i in xrange(0, len(l), size)]\n\n driver = webdriver.PhantomJS(PHANTOMJS_BIN)\n driver.implicitly_wait(TIMEOUT)\n\n driver.get(TRITONLINK_URL)\n\n # Get redirected to login page\n login_url = driver.current_url\n\n # Send to elements\n e_username = driver.find_element_by_name(USERNAME_NAME)\n e_password = driver.find_element_by_name(PASSWORD_NAME)\n e_username.send_keys(username)\n e_password.send_keys(password)\n e_password.send_keys(Keys.RETURN)\n\n try:\n WebDriverWait(driver, TIMEOUT).until(\n lambda d: d.find_element_by_css_selector(\"#%s, .%s\" %\n (CLASSES_CONTAINER_ID, LOGIN_ERROR_CLASS)\n )\n )\n\n # Check if logged in\n if driver.current_url == login_url:\n raise AuthenticationException\n\n bs_mtl = BeautifulSoup(driver.page_source)\n except TimeoutException:\n raise TritonLinkException(\"Request timed out\")\n finally:\n driver.quit()\n\n # Parse TritonLink\n\n # Get all class elements by weekday\n try:\n bs_classes_container = bs_mtl.find_all(id=CLASSES_CONTAINER_ID)[0]\n except IndexError:\n raise TritonLinkException(\"Classes container not found\")\n\n bs_classes = bs_classes_container.find_all(CLASSES_ELEM)\n by_weekday = zip(*chunk(bs_classes, len(WEEK_DAYS)))\n\n # Process each td\n classes = []\n for class_day, day in zip(WEEK_DAYS, by_weekday):\n for clazz in day:\n try:\n class_info = clazz.find_all(class_=CLASSES_CLASS)[0]\n # If empty, skip\n except IndexError:\n continue\n\n class_time, class_name, class_loc = list(class_info.stripped_strings)\n classes.append({\n 'name': class_name,\n 'day': class_day,\n 'time': class_time,\n 'location': class_loc,\n })\n\n return classes", "def check_class_in_element():\n nonlocal class_not_expected\n result = []\n expected_class_ls = expected_class.split(\" \")\n actual_class = element.get_attribute(\"class\")\n for class_ in expected_class_ls:\n for element_class_ in actual_class.split(\" \"):\n if element_class_ == class_:\n result.append(element)\n if len(result) == len(expected_class_ls):\n return element\n if class_not_expected is None:\n class_not_expected = actual_class\n return False", "def find_imgsrc_by_class(tag, class_name):\n result = []\n for item in bs.find_all(tag, {\"class\":class_name}):\n img = str(item.find('img'))\n img = re.findall(r'src=\".*?\"', img)[0].split('\"')[1]\n result.append(img)\n return result", "def get_elements_from_page(pagetree, css):\n\n # Have to convert the CSS selectors to XPATH selectors (gross).\n try:\n expression = GenericTranslator().css_to_xpath(css)\n except SelectorError:\n print('Invalid selector.')\n return\n elements = pagetree.xpath(expression)\n return elements", "def _extract_elements(self, tree, element_type):\n # creates a new attribute, e.g. 'self.nodes' and assigns it an\n # empty list\n setattr(self, element_type, [])\n etree_elements = get_elements(tree, element_type)\n for i, etree_element in enumerate(etree_elements):\n # create an instance of an element class (e.g. TokenNode)\n salt_element = create_class_instance(etree_element, i, self.doc_id)\n # and add it to the corresponding element type list,\n # e.g. 'self.nodes'\n getattr(self, element_type).append(salt_element)\n # In case of a 'nodes' element this is equivalent to:\n # self.nodes.append(TokenNode(etree_element, document_id))", "def classes(self):\n if self.classname:\n return [self.classname]\n return []", "def get_classes(self):\n query = read_query('structure exploration/classes')\n response = self._submit_query(query)\n\n return [elem['c']['value'].split('/')[-1] for elem in response]", "def get_tags(html_soup):\n \n tags = html_soup.findAll('a', attrs = {\"class\" : \"tag\"})\n all_tags = []\n for i in tags:\n all_tags.append(i.get_text())\n \n return all_tags", "def get_elements(self, by, criteria):\n # Need reuse criteria\n return self._find_by_locator().find_elements(by, criteria)" ]
[ "0.67307174", "0.6583558", "0.6581643", "0.6082963", "0.6076023", "0.60375786", "0.59123343", "0.5773693", "0.574763", "0.57319593", "0.5679493", "0.56563056", "0.5533203", "0.55314994", "0.5527751", "0.5484552", "0.5425862", "0.54025394", "0.5370151", "0.53265786", "0.53198963", "0.5292792", "0.528116", "0.52597475", "0.5256277", "0.5198077", "0.51869255", "0.5167931", "0.51670825", "0.51451796" ]
0.8594822
0
This function should return a list of soups which each correspond to the html for a single club.
def get_clubs(soup): return get_elements_with_class(soup, 'div', 'box')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_clubs_html():\n url = 'https://ocwp.apps.pennlabs.org'\n return get_html(url)", "def get_club_tags(club):\n\n div = get_elements_with_class(club, 'div', '')[0]\n if len(div) < 1:\n return []\n\n tags = get_elements_with_class(div, 'span', 'tag is-info is-rounded')\n\n return [tag.text for tag in tags]", "def gucci_finder(self, soup):\n gucci = soup.find_all(class_=\"TileItem\")\n good_gucc = []\n n=0\n for item in gucci:\n title = item.find(class_='title').get_text()\n test = self.text_search(title)\n if test == True:\n sizes = []\n for size in item.find(class_='sizes').find_all('a'):\n sizes.append(size.get_text())\n price = item.find(class_='price').get_text()\n image = item.find(class_='img-wrap').img['src']\n href = item.find('a')['href']\n n += 1\n good_gucc.append([title, sizes, price, image, href])\n\n print('We got ' + str(n) + ' guccis')\n good_gucc = pd.DataFrame(good_gucc, columns=['title', 'sizes', 'price', 'image', 'href'])\n good_gucc.sizes = good_gucc.sizes.apply(lambda x: strip_patagonia(x))\n return(good_gucc)", "def get_club_info(url):\n\tbase_url = \"http://fas-mini-sites.fas.harvard.edu/osl/grouplist\"\n\tnum = 0\n\n\tclub_url = base_url + url\n\n\tcategoryArr= []\n\n\tr = rq.get(club_url)\n\tsoup = BeautifulSoup(r.text)\n\tinfoClub = [ '' for i in range(9) ]\n\t#0: clubid\n\tclubid = url.split(\"=\")[-1]\n\tinfoClub[0] = clubid\n\t#1: clubname\n\tinfoClub[1] = soup.find(\"h2\").text\n\t \n\t# info = soup.p.get_text()\n\tinfo = soup.p.get_text().encode('ascii','ignore')\n\t#2: club description\n\tinfoClub[2] = info\n\n\tstuff = soup.ul\n\n\tstuffArray =[]\n\n\tstuffArray.append(stuff.li)\n\n\tcount = 0\n\tfor more in stuff.li.next_siblings:\n\t if (count%2 == 1):\n\t stuffArray.append(more)\n\t count +=1\n\n\t#info New: categories do .a trick\n\n\tcatRaw = BeautifulSoup(str(stuffArray[0]))\n\tcats = catRaw.find_all('a')\n\n\tfor cat in cats:\n\t catStr = []\n\t tempCat = str(cat.get('href'))\n\t catStr.append(clubid)\n\t catStr.append(tempCat[18:])\n\t categoryArr.append(catStr)\n\n\t#info 3: number of members\n\tmemStr = (str(stuffArray[1]))[49:-10]\n\n\t# print memStr\n\tif memStr == '1-9':\n\t memStr = 0\n\telif memStr == '10-25':\n\t memStr = 1\n\telif memStr == '26-50':\n\t memStr = 2\n\telif memStr == '76-100':\n\t memStr =3\n\telse:\n\t memStr = 4\n\t# print memStr\n\n\tinfoClub[3] = str(memStr)\n\n\t#inf 4: involvement\n\tinvolvementStr = str(stuffArray[2])\n\tinfoClub[4] = involvementStr[43:-10]\n\n\t#info 5: group email\n\temailRaw = BeautifulSoup(str(stuffArray[3]))\n\temail = emailRaw.a.get('href')\n\tinfoClub[5] = str(email)\n\n\t#info 6: group website\n\twebRaw = BeautifulSoup(str(stuffArray[4]))\n\tweb = webRaw.a.get('href')\n\tinfoClub[6] = str(web)\n\n\t#info 7: Mailing address\n\tmailingRaw = BeautifulSoup(str(stuffArray[5]))\n\tmail = mailingRaw.ul\n\n\tmailStr = (str(mail.li))[4:-5] + ','\n\n\tcheck = 0\n\tfor line in mail.li.next_siblings:\n\t check +=1\n\t if (check % 2 == 0):\n\t mailStr += (str(line))[4:-5]+ ','\n\n\tmailStr = mailStr[:-1]\n\tif (num != 204):\n\t mailStr.encode('ascii','ignore')\n\n\t if len(mailStr) > 255:\n\t print 'Error: mailing address too long'\n\n\t infoClub[7] = mailStr\n\telse:\n\t infoClubs[7] = \"hardcode\"\n\n\n\t#info 8: month of election\n\tstring1 = str(stuffArray[6])\n\tinfoClub[8] = string1[58:-10]\n\t\n\tprint \"Got all info of\", infoClub[0], infoClub[1]\n\n\treturn infoClub, categoryArr", "def scrape_teams():\n url_list = []\n name_list = []\n pos_list = []\n csv_path = \"../CSV_data/ActivePlayerList.csv\"\n with open(csv_path) as csv_file:\n reader = csv.reader(csv_file,skipinitialspace=True)\n for row in reader:\n pos_list.append(row[0])\n name_list.append(row[1])\n profile_url = (row[2]+\"profile\").replace(\"gamelogs?season=\", \"\")\n url_list.append(profile_url)\n\n for pos,name,url in zip(pos_list,name_list,url_list):\n file_path = \"../CSV_data/Test_Team_Scraping\"\n try:\n soup = BeautifulSoup(ul.urlopen(url).read(), \"html.parser\")\n # assign field names\n player_name = soup.find(\"span\", {\"class\" : \"player-name\"}).string\n print player_name, url\n fieldNames = soup.find(\"tr\", {\"class\" : \"player-table-key\"}).findAll(\"td\")\n numColumns = len(fieldNames)\n # pull the statistics\n #table = soup.findAll(\"table\", {\"class\":\"data-table1\"})\n tables = soup.find_all(\"table\", { \"summary\" : \"Career Stats For %s\" % (name)})\n seasons = tables[0]\n #print seasons\n body = tables.find( \"tbody\" )\n #body1 = body[0]\n print body\n print \"test1\"\n rows = body.findAll(\"tr\")\n print \"Test2\"\n rowsList = []\n for i in range(len(rows)):\n if len(rows[i]) > 2:\n print \"test3\"\n rowsList.append(rows[i])\n print rows[i]\n # remove row[0] which contains field names\n del rowsList[len(rowsList)-1]\n except IOError, e:\n print 'Failed to open url'\n print '-------------------------------------'\n if hasattr(e, 'code'):\n print 'We failed with error code - %s.' % e.code\n elif hasattr(e, 'reason'):\n print \"The error object has the following 'reason' attribute :\"\n print e.reason\n return False\n\n except IndexError:\n print 'No regular season data: Index error'\n print '-------------------------------------'\n #return False\n\n except AttributeError:\n print 'No regular season data: Attribute error'\n print '-------------------------------------'\n #return False", "def get_all_clubs():\n\turl = \"http://fas-mini-sites.fas.harvard.edu/osl/grouplist\"\n\n\tr = rq.get(url)\n\tsoup = BeautifulSoup(r.text)\n\tlinks = soup.find_all('a')\n\n\tlinkArray = []\n\tnameArray = []\n\n\tfor link in links:\n\t\tl = link.get('href')\n\t\tlinkArray.append(l)\n\t\tname = link.get_text()\n\t\tname = name.encode('ascii','ignore')\n\t\tnameArray.append(name)\n\n\treturn nameArray, linkArray", "def getStories(self, source):\n self.numberOfStoriesOnFrontPage = source.count(\"span id=score\")\n # Create the empty stories.\n newsStories = []\n for i in range(0, self.numberOfStoriesOnFrontPage):\n story = HackerNewsStory()\n newsStories.append(story)\n \n soup = BeautifulSoup(source)\n # Gives URLs, Domains and titles.\n story_details = soup.findAll(\"td\", {\"class\" : \"title\"}) \n # Gives score, submitter, comment count and comment URL.\n story_other_details = soup.findAll(\"td\", {\"class\" : \"subtext\"})\n\n # Get story numbers.\n storyNumbers = []\n for i in range(0,len(story_details) - 1, 2):\n story = str(story_details[i]) # otherwise, story_details[i] is a BeautifulSoup-defined object.\n storyNumber = self.getStoryNumber(story)\n storyNumbers.append(storyNumber)\n \n storyURLs = []\n storyDomains = []\n storyTitles = []\n storyScores = []\n storySubmitters = []\n storyCommentCounts = []\n storyCommentURLs = []\n storyIDs = []\n\n for i in range(1, len(story_details), 2): # Every second cell contains a story.\n story = str(story_details[i])\n storyURLs.append(self.getStoryURL(story))\n storyDomains.append(self.getStoryDomain(story))\n storyTitles.append(self.getStoryTitle(story))\n \n for s in story_other_details:\n story = str(s)\n storyScores.append(self.getStoryScore(story))\n storySubmitters.append(self.getSubmitter(story))\n storyCommentCounts.append(self.getCommentCount(story))\n storyCommentURLs.append(self.getCommentsURL(story))\n storyIDs.append(self.getHNID(story))\n \n \n # Associate the values with our newsStories. \n for i in range(0, self.numberOfStoriesOnFrontPage):\n newsStories[i].number = storyNumbers[i]\n newsStories[i].URL = storyURLs[i]\n newsStories[i].domain = storyDomains[i]\n newsStories[i].title = storyTitles[i]\n newsStories[i].score = storyScores[i]\n newsStories[i].submitter = storySubmitters[i]\n newsStories[i].commentCount = storyCommentCounts[i]\n newsStories[i].commentsURL = storyCommentURLs[i]\n newsStories[i].id = storyIDs[i]\n \n return newsStories", "def clubs(self):\n catalog = getToolByName(self.context, 'portal_catalog')\n\n return [dict(url=club.getURL(), title=club.Title, sport=club.Sport,\n address=club.Description) for club in\n catalog({'object_provides': IClub.__identifier__,\n 'path': dict(query='/'.join(self.context.getPhysicalPath()),\n depth=1), 'sort_on': 'sortable_title'})]", "def __local_sp(soup):\n news = []\n titles = soup.find('section', class_='col-xs-12 maislidas-interno').find_all('h3', class_='fifth')\n\n for title in titles:\n news.append(dict(title=title.string, link=title.parent['href']))\n return news", "def scrape_crew(self):\n\n page = requests.get(self.url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n results = soup.find(\"div\", id=\"fullcredits_content\")\n directors_and_writers = results.find_all(\n \"table\", class_=\"simpleTable simpleCreditsTable\"\n )\n cast = results.find(\"table\", class_=\"cast_list\")\n\n crew = []\n crew.append(directors_and_writers[0])\n crew.append(directors_and_writers[1])\n crew.append(cast)\n\n return crew", "def mine(self):\n collections = []\n # Getting HTML snapshot with selenium, storing a soup object in .data\n self.scrape()\n # Returns only the parts of the soup that surround each collection\n collection_elements = self.get_collection_elements()\n # Turns each soup element into a CollectionElement object\n collections = self.get_info_from_collections(collection_elements)\n # NOTE THE RETURN VALUE IS MERELY TO PASS TESTING< MUST BE CHANGED\n return self.data", "def scrape_all_world_cup_lineups():\n def scrape_lineups_year(year):\n urls = scrape_world_cup_scoreboard(year)\n lineups = []\n for url in urls:\n lineups.extend(scrape_fifa_lineups(url, 'FIFA World Cup'))\n return lineups\n\n l = []\n for year in sorted(world_cup_mapping.keys()):\n l.extend(scrape_lineups_year(year))\n return l", "def scrape_fifa_goals(url, competition):\n\n # Seems the 2006 world cup report is missing some games for sasa ilic.\n goal_replace = {\n \"(SCG) 20',\": \"Sasa ILIC (SCG) 20',\"\n }\n\n\n data = scrape_url(url)\n data = data.split(\"<h2>Advertisement</h2>\")[0]\n soup = BeautifulSoup(data)\n\n goals_div = soup.find(\"div\", text='Goals scored')\n goals = [get_contents(e) for e in goals_div.parent.parent.findAll(\"li\")]\n goals = [goal_replace.get(e, e) for e in goals]\n\n goal_re = re.compile(\"^(?P<name>.*?) \\((?P<team>[A-Z]+)\\) (?P<minute>\\d+)'?\")\n\n game_data = scrape_fifa_game(url, competition)\n\n\n\n l = []\n\n for s in goals:\n try:\n name, team, minute = goal_re.search(s.strip()).groups()\n except:\n #import pdb; pdb.set_trace()\n print(s)\n continue\n \n team = team.strip()\n team = team_abbrevs.get(team, team)\n\n l.append({\n 'team': team,\n 'competition': competition,\n 'season': game_data['season'],\n 'date': game_data['date'],\n 'goal': name.strip().title(),\n\n\n 'minute': int(minute),\n 'assists': [],\n 'source': url\n })\n\n return l", "def scrape_all_world_cup_games():\n\n def scrape_scores_year(year):\n urls = scrape_world_cup_scoreboard(year)\n scores = [scrape_fifa_game(url, 'FIFA World Cup') for url in urls]\n return scores\n\n l = []\n for year in sorted(world_cup_mapping.keys()):\n l.extend(scrape_scores_year(year))\n return l", "def get_clubs_and_transfers(league_name, league_id, season_id, window):\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'}\r\n base = \"https://www.transfermarkt.it\"\r\n url = base + \"/{league_name}/transfers/wettbewerb/{league_id}/plus/?saison_id={season_id}&s_w={window}\".format(\r\n league_name=league_name, league_id=league_id, season_id=season_id, window=window)\r\n try:\r\n print(\"Connecting...\")\r\n response = requests.get(url, headers=headers)\r\n print(\"Connection successful, status code {}\".format(response.status_code))\r\n except requests.exceptions.RequestException as e:\r\n print(e)\r\n exit()\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n\r\n clubs = [tag.text for tag in soup.find_all('div', {'class': 'table-header'})][1:]\r\n\r\n tables = [tag.findChild() for tag in soup.find_all('div', {'class': 'responsive-table'})]\r\n table_in_list = tables[::2]\r\n table_out_list = tables[1::2]\r\n\r\n transfer_in_list = []\r\n transfer_out_list = []\r\n column_headers = {'season': season_id, 'window': window, 'league': league_name}\r\n for table_in, table_out in zip(table_in_list, table_out_list):\r\n transfer_in_list.append(get_transfer_info(base, table_in, movement='In', **column_headers))\r\n transfer_out_list.append(get_transfer_info(base, table_out, movement='Out', **column_headers))\r\n\r\n return clubs, transfer_in_list, transfer_out_list", "def get_player_data(player, battleTag, responce):\r\n # Convert responce to a \"soup\" object by passing it to the soup constructor, and specify lxml as encoder \r\n soup = BeautifulSoup(responce.text, 'lxml')\r\n # List to store Hero Names and Quick Scores \r\n heroes = []\r\n # Loop Through each HTML tag under '<div>' : class: 'name' and look for name contents\r\n # In children, decode and output contents \r\n for parent in soup.find_all('div', {'class': 'name' }): # Specify the parent classes name, type(bs4.element.Tag)\r\n for child in parent.findChildren('a', recursive = False): # Access all of its children, store inside child var type(bs4.element.Tag) \r\n heroes.append(child.decode_contents()) # Get the contents of the child, add to the heroes list type(str)\r\n \r\n quick_scores = [] # To Store the quickscores \r\n # Loop Through each HTML tag under 'div' : class: group special and look for name \r\n #contents In children, decode and output contents, \r\n for parent in soup.find_all('div', {'class': 'group special' }):\r\n children = parent.findChildren('div', recursive = False)\r\n if not 'padded' in children[1].get('class'):\r\n quick_scores.append(children[1].findChildren('div', {'class': 'value' }, recursive = False)[0].decode_contents())\r\n \r\n player_image_link =\"\" \r\n\r\n # Get the profile Icon of the player\r\n for link in soup.find_all('div', {'class': 'image-with-corner' }):\r\n images = link.find_all('img')\r\n for img in images:\r\n if \"image-player\" in img['class']: \r\n player_image_link = img['src']\r\n\r\n # Get the number of wins from each hero and overall number of wins by the player\r\n # This time using regex, because why not :>\r\n temp = re.findall(\"<span class=\\\"color-stat-win\\\">[0-9]+</span>\", responce.text)\r\n i = 0\r\n hero_wins = []\r\n for elt in temp: \r\n if i < len(quick_scores)+1:\r\n val = re.sub(\"[^0-9]\", \"\", elt)\r\n hero_wins.append(val)\r\n i = i+1\r\n \r\n player.total_wins = hero_wins[0] # First item is Overall wins by player so far\r\n hero_wins.pop(0) \r\n player.hero_wins = hero_wins # other elements are wins from heroes\r\n \r\n # Convert scores to numeric format i.e 11,534 to 11534\r\n numeric_scores = []\r\n for x in quick_scores:\r\n numeric_scores.append(int(x.replace(',', '')))\r\n \r\n player.battle_tag = battleTag\r\n player.heroes = heroes\r\n player.quick_scores = numeric_scores\r\n player.player_logo = player_image_link", "def get_smmry_data(soup, game_dict):\n\n # Get date and time data.\n try:\n date_soup = soup.find(\"div\", {\"class\": \"spielbericht_tipp_status\"})\n league = date_soup.div.span.text\n date_string = date_soup.div.text\n date = re.search(r'\\d{2}.\\d{2}.\\d{2}', date_string).group(0)\n time = re.search(r'\\d{2}:\\d{2}', date_string).group(0)\n matchday = re.search(r'[|]\\d+', date_string).group(0)[1:]\n\n game_dict[\"league\"] = league\n game_dict[\"fb_date\"] = date\n game_dict[\"fb_time\"] = time\n game_dict[\"matchday\"] = matchday\n except AttributeError:\n pass\n\n # Get game result.\n try:\n result = soup.find(\"div\", {\"class\": \"stand\"}).text\n game_dict[\"result\"] = result\n except AttributeError:\n pass\n\n # Try to get the referee name.\n try:\n referee = soup.find(\"span\", {\"class\": \"schiri_link\"}).text\n game_dict[\"referee\"] = referee\n except AttributeError:\n pass\n\n # Get team, club name and repective url by team.\n try:\n smmry_soup = soup.find(\n \"div\", {\"class\": \"spielbericht_ergebnis_wrapper\"})\n club_title = smmry_soup.find_all(\"img\")\n team_title = smmry_soup.findAll(\"div\", {\"class\": \"teaminfo\"})\n\n # Loop through teams.\n for j, team in enumerate([\"home_\", \"away_\"]):\n game_dict[team + \"team\"] = team_title[j].a[\"title\"]\n game_dict[team + \"team_url\"] = team_title[j].a[\"href\"]\n game_dict[team + \"club\"] = club_title[j][\"title\"]\n except (AttributeError, TypeError):\n pass\n\n return game_dict", "def scraping_actual_team_players(team_abbreviation):\n starting_point = domain + \"/teams/\"\n teamurl = starting_point + team_abbreviation + \"/\"\n team_id = \"div_\" + team_abbreviation\n html = urlopen(teamurl)\n bs = BeautifulSoup(html, 'html.parser')\n actual_team_url = domain + str(bs.find(\"div\", {'id': team_id}).find(\"a\").get(\"href\"))\n html = urlopen(actual_team_url)\n bs = BeautifulSoup(html, 'html.parser')\n players = bs.find(\"table\", {'id':'roster'}).findAll(\"td\", {\"data-stat\":\"player\"})\n players_url = [player.find(\"a\").get(\"href\") for player in players]\n team_players_list = []\n for player_url in players_url:\n time.sleep(3)\n url = domain + player_url\n html = urlopen(url)\n bs = BeautifulSoup(html, 'html.parser')\n print(player_url)\n try:\n tabla = pd.read_html(str(bs.find(\"div\", {'id':'all_per_poss'})).replace(\"<!--\", \"\"))[0] \n tabla[\"Player\"] = bs.find(\"h1\", {\"itemprop\" : \"name\"}).text.strip()\n indice = tabla[tabla[\"Season\"]==\"Career\"].index[0]\n tabla = tabla[0:indice]\n tabla = tabla.drop(axis= 1,columns = \"Unnamed: 29\")\n #no me encuentra tablas para uno del college que es el darlina01\n print(player_url)\n team_players_list.append(tabla)\n except:\n pass\n return pd.concat(team_players_list)", "def scrape_current_players(positions):\n for i in range(len(positions)):\n for page in range(6):\n position = positions[i]\n url = \"http://www.nfl.com/players/search?category=position&playerType=current&conference=ALL&d-447263-p=%s&filter=%s&conferenceAbbr=null\" % (page+1, position)\n try:\n soup = BeautifulSoup(ul.urlopen(url).read(), \"html.parser\")\n links = soup.findAll('a', href=re.compile('^/player/'))\n for j in range(len(links)):\n nameFirstLast = str(links[j]).split('\"')[2].lstrip('>').rstrip('</a>').split(',')[1].lstrip() + \" \" + str(links[j]).split('\"')[2].lstrip('>').rstrip('</a>').split(',')[0]\n link = \"http://www.nfl.com\" + str(links[j]).split('\"')[1].rstrip('profile') + \"gamelogs?season=\"\n outputLine = abbr[position], ',', nameFirstLast, ',', link, '\\n'\n with open(\"../CSV_data/ActivePlayerList.csv\", \"a\") as text_file:\n text_file.writelines(outputLine)\n text_file.close()\n except IOError, e:\n print 'Failed to open url'\n print '-------------------------------------'\n if hasattr(e, 'code'):\n print 'We failed with error code - %s.' % e.code\n elif hasattr(e, 'reason'):\n print \"The error object has the following 'reason' attribute :\"\n print e.reason\n return False", "def get_scrapps(self): \n scrapps = []\n self.validate_url()\n soup = self.get_content()\n links = soup.find_all(\"a\")\n table = soup.find_all('div',attrs={\"class\" : \"ph-person-home person-section\"})\n scrapp = Scrapp()\n for tag in table:\n try:\n text = tag.text.replace(\"\\n\",\" \").replace(\"\\r\",\" \").replace(\"\\xa0\",\" \")\n scrapp.add_meta('text',text)\n except KeyError:\n continue\n for link in links:\n try:\n if 'orcid' in link.attrs['href']:\n scrapp.add_meta(\"orcid_link\", link.attrs['href'])\n if \"researcherid\" in link.attrs['href']:\n scrapp.add_meta(\"researchid_link\", link.attrs['href'])\n if \"scholar.google\" in link.attrs['href']:\n scrapp.add_meta(\"googlescholar_link\", link.attrs['href'])\n except KeyError:\n # not all 'a' tags have the links we want\n continue\n scrapps.append(scrapp)\n return scrapps", "def clubs(self):\n return sorted(tuple([v for v in self if v.suit == 'clubs']), reverse=True)", "def _process_html(self) -> None:\n opinion_json = self.request[\"response\"].json()\n for case in opinion_json:\n url = self._get_url(case[\"docketNumber\"], case[\"docketEntryId\"])\n status = (\n \"Published\"\n if case[\"documentType\"] == \"T.C. Opinion\"\n else \"Unpublished\"\n )\n self.cases.append(\n {\n \"judge\": case[\"judge\"],\n \"date\": case[\"filingDate\"][:10],\n \"docket\": case[\"docketNumber\"],\n \"url\": url,\n \"name\": titlecase(case[\"caseCaption\"]),\n \"status\": status,\n }\n )", "def scrape_world_cup_scoreboard(year):\n # Replace this with the results logic somehow...\n\n d = world_cup_mapping[year]\n prefix = 'http://www.fifa.com'\n if type(d) == int:\n root_url = '/worldcup/archive/edition=%s/' % d\n else:\n root_url = '/worldcup/archive/%s/' % d\n data = scrape_url(prefix + root_url + \"results/index.html\")\n\n # Find urls in the page.\n match_re = re.compile(root_url + \"results/matches/match=\\d+/report.html\")\n urls = match_re.findall(data)\n return [prefix + e for e in urls]", "def __folha(soup):\n news = []\n\n items = soup.find('li', class_='c-most-popular__item')\n\n for item in items:\n div = item.find('div', class_='c-most-popular__content')\n link = div.a['href']\n title = div.a.string\n news.append(\n dict(title=title, link=replace_original_link_with_outline_call(link)))\n\n return news", "def scrape():\n url_base='https://www.usnews.com/best-colleges/rankings/national-universities'\n unvss=[]\n for page in range(N_PAGE):\n url=url_base+'?_page={}'.format(page+1)\n soup=get_soup(url)\n unvs_tags=soup.find_all('li',id=re.compile(r'^view-.*'),class_='block-normal block-loose-for-large-up')\n for unvs_tag in unvs_tags:\n u=Unvs(unvs_tag)\n print(\"Collect info of {}\".format(u.name))\n unvss.append(u)\n return unvss", "def _scrape(self):", "def get_player_data(soup, game_dict):\n\n # Loop through teams to store information by team.\n for i, team in enumerate([\"home\", \"away\"]):\n try:\n plyrs_soup = soup.findAll(\n \"div\", {\"class\": \"aufstellung_ausgabe_block {}side\".format(team)})[0]\n plyr_data = plyrs_soup.findAll(\"a\", {\"class\": \"spieler_linkurl\"})\n\n # Loop through players by team.\n for j, plyr in enumerate(plyr_data):\n try:\n game_dict[\"{}_plyr_{}\".format(team, j)] = plyr.text\n game_dict[\"{}_plyr_url_{}\".format(team, j)] = plyr[\"href\"]\n except AttributeError:\n pass\n except (AttributeError, IndexError):\n pass\n\n return game_dict", "def create_full_batter_html(url):\n # raw_batter_list = scrape_razzball_batters(url)\n raw_batter_list = fantasy_pro_players(url)\n return create_full_batter(raw_batter_list)", "def div_html_list(self):\n return self.q(css='div.test').html", "def scrape_all():\n\n # Scrape team information by season\n for team in scrape_utils.team_names():\n team_season_stats(team)\n # Each season\n print(team)\n for year in range(2019, 2020):\n # Game Logs\n season_game_logs(team, year)\n\n # Starting Lineups\n #player_scraper.get_starting_lineups(team, year)\n\n # Init mongo to get game IDS for box score scraping\n m = mongo.Mongo()\n\n # Game Information (Box Score and Play by Play)\n for year in range(2015, 2020):\n player_scraper.get_starting_lineups(year)\n for game in m.find('game_log', {'season': year}, {'_id': 1}):\n #team_scraper.play_by_play(game['_id'])\n player_scraper.player_box_score(game['_id'])\n\n print(game['_id'])\n\n\n\n # Get player information\n for player in scrape_utils.get_active_players():\n print(player)\n player_scraper.player_per_game(player)\n\n # Get betting lines (By Year) need from 2014\n for year in range(2015, 2020):\n team_scraper.betting_lines(2019)" ]
[ "0.70870376", "0.6348181", "0.6346335", "0.61832196", "0.59886855", "0.59778345", "0.5971089", "0.59584236", "0.5805674", "0.5787261", "0.5758521", "0.5743293", "0.57430327", "0.5740194", "0.5737127", "0.5732994", "0.5730185", "0.57068115", "0.56748796", "0.5651923", "0.5640834", "0.56069595", "0.55830365", "0.5576259", "0.55749536", "0.5569199", "0.5564144", "0.556129", "0.5553104", "0.5551813" ]
0.69576275
1
Returns the string of the name of a club, when given a soup containing the data for a single club. We've implemented this method for you to demonstrate how to use the functions provided.
def get_club_name(club): elts = get_elements_with_class(club, 'strong', 'club-name') if len(elts) < 1: return '' return elts[0].text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def club_id(self, club_name):\r\n # UTF-8 comparison\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find('table', cellpadding=2).find_all('tr'):\r\n # Get teamid from the bets\r\n team1 = i.find('a')['title']\r\n team2 = i.find_all('a')[1]['title']\r\n if club_name == team1:\r\n return i.find('a')['href'].split('cid=')[1]\r\n elif club_name == team2:\r\n return i.find_all('a')[1]['href'].split('cid=')[1]\r\n return None", "def nameGet(soup):\n name = soup.find('span', id='title', class_='a-size-small')\n name = name.text\n name = name.strip()\n\n return name", "def get_club_description(club):\n elts = get_elements_with_class(club, 'em', '')\n if len(elts) < 1:\n return ''\n return elts[0].text\n\n return ''", "def get_club_info(url):\n\tbase_url = \"http://fas-mini-sites.fas.harvard.edu/osl/grouplist\"\n\tnum = 0\n\n\tclub_url = base_url + url\n\n\tcategoryArr= []\n\n\tr = rq.get(club_url)\n\tsoup = BeautifulSoup(r.text)\n\tinfoClub = [ '' for i in range(9) ]\n\t#0: clubid\n\tclubid = url.split(\"=\")[-1]\n\tinfoClub[0] = clubid\n\t#1: clubname\n\tinfoClub[1] = soup.find(\"h2\").text\n\t \n\t# info = soup.p.get_text()\n\tinfo = soup.p.get_text().encode('ascii','ignore')\n\t#2: club description\n\tinfoClub[2] = info\n\n\tstuff = soup.ul\n\n\tstuffArray =[]\n\n\tstuffArray.append(stuff.li)\n\n\tcount = 0\n\tfor more in stuff.li.next_siblings:\n\t if (count%2 == 1):\n\t stuffArray.append(more)\n\t count +=1\n\n\t#info New: categories do .a trick\n\n\tcatRaw = BeautifulSoup(str(stuffArray[0]))\n\tcats = catRaw.find_all('a')\n\n\tfor cat in cats:\n\t catStr = []\n\t tempCat = str(cat.get('href'))\n\t catStr.append(clubid)\n\t catStr.append(tempCat[18:])\n\t categoryArr.append(catStr)\n\n\t#info 3: number of members\n\tmemStr = (str(stuffArray[1]))[49:-10]\n\n\t# print memStr\n\tif memStr == '1-9':\n\t memStr = 0\n\telif memStr == '10-25':\n\t memStr = 1\n\telif memStr == '26-50':\n\t memStr = 2\n\telif memStr == '76-100':\n\t memStr =3\n\telse:\n\t memStr = 4\n\t# print memStr\n\n\tinfoClub[3] = str(memStr)\n\n\t#inf 4: involvement\n\tinvolvementStr = str(stuffArray[2])\n\tinfoClub[4] = involvementStr[43:-10]\n\n\t#info 5: group email\n\temailRaw = BeautifulSoup(str(stuffArray[3]))\n\temail = emailRaw.a.get('href')\n\tinfoClub[5] = str(email)\n\n\t#info 6: group website\n\twebRaw = BeautifulSoup(str(stuffArray[4]))\n\tweb = webRaw.a.get('href')\n\tinfoClub[6] = str(web)\n\n\t#info 7: Mailing address\n\tmailingRaw = BeautifulSoup(str(stuffArray[5]))\n\tmail = mailingRaw.ul\n\n\tmailStr = (str(mail.li))[4:-5] + ','\n\n\tcheck = 0\n\tfor line in mail.li.next_siblings:\n\t check +=1\n\t if (check % 2 == 0):\n\t mailStr += (str(line))[4:-5]+ ','\n\n\tmailStr = mailStr[:-1]\n\tif (num != 204):\n\t mailStr.encode('ascii','ignore')\n\n\t if len(mailStr) > 255:\n\t print 'Error: mailing address too long'\n\n\t infoClub[7] = mailStr\n\telse:\n\t infoClubs[7] = \"hardcode\"\n\n\n\t#info 8: month of election\n\tstring1 = str(stuffArray[6])\n\tinfoClub[8] = string1[58:-10]\n\t\n\tprint \"Got all info of\", infoClub[0], infoClub[1]\n\n\treturn infoClub, categoryArr", "def display_info(club_data):\n\n for item in club_data:\n if \":\" in item:\n print(f\" {item}\")\n else:\n print(f\"\\nCLUB NAME: {item}\")", "def get_champion_name(champion_id, api_key=read_key(), region='na'):\n response = urllib2.urlopen('https://global.api.pvp.net/api/lol/static-data/'+region+'/v1.2/champion/' +\n str(champion_id)+'?api_key=' + api_key)\n champion = json.load(response)\n return champion['name']", "def nameGetOther(soup):\n name = soup.find('h1', id='title', class_='a-size-medium')\n name = name.text\n name = name.strip()\n\n return name", "def get_clubs(soup):\n\n return get_elements_with_class(soup, 'div', 'box')", "def get_clubs_html():\n url = 'https://ocwp.apps.pennlabs.org'\n return get_html(url)", "def __getCompanyName(parsed: BeautifulSoup) -> str:\n\n # Company name container\n name_container = parsed.find('span', class_='companyName')\n\n # Extracting raw text elements\n name_raw_text = [s for s in name_container.children if isinstance(s, str)]\n\n # Getting name (first raw text instance)\n return name_raw_text[0].strip()", "def get_name() -> str:", "def get_club_info(headers):\n\n # For storing the club data\n club_data = []\n\n try:\n for start, info in enumerate(headers):\n each = info.getText()\n if each == \"Club career\":\n start += 1\n break\n\n while headers[start].getText() != \"International career\":\n club_data.append(headers[start].getText())\n start += 1\n\n return club_data\n\n # In case the name given is not a Football player\n except (TypeError, IndexError):\n print(\"INVALID PLAYER NAME!!\")\n return 0", "def get_name():\n return \"Boss\"", "def get_course_name(self,soup):\n\t\tcourse_title = []\n\t\tfor title in soup.find_all(\"div\", class_=\"views-field views-field-title\"):\n\t\t\tcourse_title.append(''.join(title.findAll(text=True)))\n\t\t\tcourse_title.append(' \\n')\t\n\t\t\n\t\tself.new_list.append(course_title)\n\t\treturn course_title", "def get_club_tags(club):\n\n div = get_elements_with_class(club, 'div', '')[0]\n if len(div) < 1:\n return []\n\n tags = get_elements_with_class(div, 'span', 'tag is-info is-rounded')\n\n return [tag.text for tag in tags]", "def club_info(self, cid):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/clubInfo.phtml?cid=' + cid, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n plist = list()\r\n for i in soup.find('table', cellpadding=2).find_all('tr')[1:]:\r\n plist.append('%s\\t%s\\t%s\\t%s\\t%s' % (\r\n i.find_all('td')[0].text, i.find_all('td')[1].text, i.find_all('td')[2].text, i.find_all('td')[3].text,\r\n i.find_all('td')[4].text))\r\n return soup.title.text, plist", "def get_name(self):\n return self.soup.find('div', id = 'zh-topic-title').h1\\\n .get_text(strip = True).encode(CODE)", "def parse_name_movie(soup, pageurl):\n\t# find the summary class header\n\tname_tag = soup.findAll('th', {'class': 'summary'})\n\t# if this header doesn't exist, cannot retrieve name\n\tif len(name_tag) == 0:\n\t\tlogging.warn('' + pageurl + 'does not have a valid name field, parsing terminated')\n\t\treturn None\n\t# return name as a string\n\treturn name_tag[0].get_text()", "def get_name():", "def getName(self, html):\n soup = bs(html, \"lxml\")\n results = soup.findAll(\"h1\", {\"data-reactid\" : \"7\"})\n if len(results) != 1:\n return False, None\n name = results[0].text.split(' (')[0]\n return True, name", "def get_all_clubs():\n\turl = \"http://fas-mini-sites.fas.harvard.edu/osl/grouplist\"\n\n\tr = rq.get(url)\n\tsoup = BeautifulSoup(r.text)\n\tlinks = soup.find_all('a')\n\n\tlinkArray = []\n\tnameArray = []\n\n\tfor link in links:\n\t\tl = link.get('href')\n\t\tlinkArray.append(l)\n\t\tname = link.get_text()\n\t\tname = name.encode('ascii','ignore')\n\t\tnameArray.append(name)\n\n\treturn nameArray, linkArray", "def get_CityName():\n return str(root.find('provincia').text) # root.find('province') returns the direct child 'province' of root. ...\n # ... An equivalent way to get the same result is ( root[3].text ), where ...\n # ... root[2] represents 'province' tag and it's the 4th direct child of root.", "def parse_name_actor(soup, pageurl):\n\t# find fn (fullname) class\n\tname_span = soup.findAll('span', {'class': 'fn'});\n\t# if class does not exist, cannot get name\n\tif len(name_span) == 0:\n\t\tlogging.warning('' + pageurl + ' does not contain a name for the actor and will not be parsed')\n\t\treturn None\n\tname = name_span[0].get_text()\n\t# handle edge cases where HTML is butchered - cannot convert to JSON if this goes through\n\tif '<' in name:\n\t\tlogging.warning('' + pageurl + ' does not contain a name for the actor and will not be parsed')\n\t\treturn None\n\treturn name", "def get_recipe_title(soup_recipe):\n return soup_recipe.find(\"h1\", {\"itemprop\": \"name\"}).get_text()", "def get_recipe_title(soup_recipe):\n return soup_recipe.find(\"h1\", {\"itemprop\": \"name\"}).get_text().strip()", "def get_apartment_name(self, soup, apartment_dict):\n\n info_class = soup.find_all('div', {'class': 'info'})\n if info_class and len(info_class) > 0:\n info_class = info_class[0]\n else:\n logging.warning(\"Failed to parse apartment name\")\n return None\n\n title = info_class.find('h1').text.strip()\n apartment_dict['name'] = title", "def cikToName(CIK):\n\n # Form URL to search for CIK\n url = \"http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK=\" \\\n + str(CIK)\n\n # Get the results\n resp = requests.get(url)\n soup = BeautifulSoup(resp.text, \"html.parser\")\n\n # There should be a tag with the name in it\n tag = soup.find_all('span', attrs={\"class\": \"companyName\"})[0]\n name = tag.contents[0]\n\n return name", "def get_league_name(wd):\n try:\n league = wd.find_element_by_xpath('/html/body/div[4]/div[5]/div/div/div[1]/section/header/h1/a').text\n # There used to be cleaning to letters but there can be years of league (and endings like league 1)\n return league\n except:\n return \"N/A League\"", "def printname(bruce):", "def get_name_company(html_job_container):\n lines = html_job_container.splitlines()\n if len(lines) > 0:\n return lines[0]\n return None" ]
[ "0.64601535", "0.64196163", "0.64184594", "0.6161314", "0.60368645", "0.5965522", "0.5939414", "0.593849", "0.5840082", "0.57630974", "0.5689154", "0.559938", "0.5528548", "0.5501211", "0.54695916", "0.54668534", "0.5447831", "0.54451835", "0.54218644", "0.5407108", "0.53945386", "0.5343761", "0.5328504", "0.532509", "0.5297434", "0.5282858", "0.5280978", "0.52595794", "0.52579063", "0.52447575" ]
0.7737461
0
Extract club description from a soup of
def get_club_description(club): elts = get_elements_with_class(club, 'em', '') if len(elts) < 1: return '' return elts[0].text return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scrape_description(main_soup):\n rich_text = main_soup.find('div', {'class': 'rich-text'})\n description = rich_text.find('p').text\n\n return description", "def get_description(soup_recipe):\n description = soup_recipe.find(\"div\", {\"itemprop\": \"description\"})\n if not description:\n return None\n return description.get_text().strip()", "def _get_description(li):\n\n sdiv = li.find(\"div\", attrs={\"class\": \"result__body\"})\n if sdiv:\n stspan = sdiv.find(\"a\", attrs={\"class\": \"result__snippet\"})\n if stspan is not None:\n # return stspan.text.encode(\"utf-8\").strip()\n return stspan.text.strip()\n else:\n return None", "def get_description(soup):\n\n standard_head = ['Description', 'Getting There', 'Protection', 'Location']\n\n # grab all h3 orange header sections on the page\n detail = {}\n other_text = []\n for h3 in soup.find_all('h3', { 'class': \"dkorange\" }):\n \n # text is the element after the h3\n body = h3.next_sibling\n \n if isinstance(body, NavigableString):\n # ignore sections from here on like 'Climbing Season' and such\n break\n else:\n # these are the valuable text sections\n body = body.get_text()\n body = body.encode('utf-8', errors = 'ignore')\n head = h3.get_text().encode('utf-8', errors = 'ignore')\n head = head.strip('\\xc2\\xa0')\n\n if head in standard_head:\n head = head.replace(' ','_').lower()\n detail[head] = body\n else:\n other_text.append(body)\n\n # combine text into a full description\n if len(other_text) > 0:\n if 'description' in detail:\n # combine description with other text -- questionable but appropriate\n detail['description'] = detail['description'] + '\\n'.join(other_text)\n else:\n detail['description'] = '\\n'.join(other_text)\n\n # blank if there is no text at all\n if 'description' not in detail:\n detail['description'] = ''\n\n return detail", "def description_mega(self, html): # pylint: disable=too-many-statements,too-many-branches\n description_list = []\n with suppress(Exception):\n '''\n Tested on\n * https://economictimes.indiatimes.com/news/economy/policy/government-mops-up-rs-8660-cr-from-disinvestment-in-02/articleshow/33105933.cms\n <meta content=\"The total disinvestment realisation of the government during 2002 topped Rs 8,660 crore. The cabinet committee on disinvestment (CCD) had cleared transactions worth Rs 6,168 crore during the year.\" name=\"description\">\n * https://timesofindia.indiatimes.com/city/bengaluru/ISROs-second-launch-pad-to-be-sent-by-March-end/articleshow/3801270.cms\n <meta name=\"description\" content=\"BANGALORE: The second launch pad for the Indian Space Research Organisation will be dispatched to Sriharikota by the end of March. The Mobile Launch P\">\n '''\n meta_name_description = html.find('meta', {'name': 'description'})\n description_list.append(\n self.text_cleaning(meta_name_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.deccanherald.com/content/1368/agriculture-department-urged-regulate-fertilisers.html\n <meta property=\"og:description\" content=\"Farmers will be happy only if they get good rains and sufficient fertilisers. They were is deep trouble due to the improper supply of fertilisers.\">\n * https://sports.ndtv.com/cricket/we-cant-influence-indian-high-commission-for-visas-pcb-1594242\n <meta property=\"og:description\" content=\"Pakistan Cricket Board made it clear that it had done everything under its power to get the visas for its cricketers to play in the IPL next year.\">\n '''\n meta_property_og_description = html.find(\n 'meta', {'property': 'og:description'})\n description_list.append(\n self.text_cleaning(meta_property_og_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.independent.co.uk/news/world/americas/elijah-mcclain-death-colorado-police-black-lives-matter-george-floyd-police-a9584366.html\n <meta name=\"twitter:description\" content=\"'Demand these officers are taken off duty, and that a more in-depth investigation is held', page reads\">\n * https://nypost.com/2010/09/27/brooklyn-tea-party-rallies-against-ground-zero-mosque-multiculturalism/\n <meta name=\"twitter:description\" content=\"About 125 people gathered at a recent Bay Ridge rally of the Brooklyn Tea Party to protest a variety of hot subjects — especially the planned Ground Zero mosque, according to a Brooklyn Ink\">\n '''\n meta_name_twitter_description = html.find(\n 'meta', {'name': 'twitter:description'})\n description_list.append(\n self.text_cleaning(meta_name_twitter_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.standard.co.uk/news/uk/boris-johnson-u-turn-free-school-meals-marcus-rashford-a4470506.html\n <meta property=\"twitter:description\" content=\"'THIS is England in 2020'\">\n * https://www.express.co.uk/news/politics/1369685/brexit-news-uk-eu-trade-deal-france-fishing-emmanuel-macron-no-deal-latest\n <meta property=\"twitter:description\" content=\"FRENCH fishermen have lashed out at Emmanuel Macron, warning he is playing a &quot;dangerous game&quot; and has &quot;overstepped the mark&quot; by threatening to veto a post-Brexit trade deal with the UK.\">\n '''\n meta_property_twitter_desc = html.find(\n 'meta', {'property': 'twitter:description'})\n description_list.append(\n self.text_cleaning(meta_property_twitter_desc['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.indiatoday.in/india/story/pm-modi-launch-covid-vaccination-drive-jan-16-cowin-app-coronavirus-covaxin-covishield-1758628-2021-01-13\n <meta itemprop=\"description\" content=\"Prime Minister Narendra Modi will kickstart the Covid-19 vaccination programme in India with a virtual launch on January 16, sources have told India Today.\">\n * https://indianexpress.com/article/world/print/four-killed-as-armed-militants-storm-5-star-hotel-in-pakistans-gwadar-port-city-police-5723193/\n <meta itemprop=\"description\" content=\"A shootout between the militants and the security forces broke out at the hotel as the anti-terrorism force, the Army and the Frontier Corps were called in, Gwadar Station House Officer (SHO) Aslam Bangulzai said.\">\n '''\n meta_itemprop_description = html.find('meta',\n {'itemprop': 'description'})\n description_list.append(\n self.text_cleaning(meta_itemprop_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.cnbc.com/2020/12/25/the-plant-based-meat-industry-is-on-the-rise-but-challenges-remain.html\n <meta itemprop=\"description\" name=\"description\" content=\"Demand for meat alternatives has grown and will continue to rise, but the industry still has hurdles to overcome in different parts of the world, analysts said.\">\n * https://www.oneindia.com/india/congress-leader-dk-shivakumar-to-appear-before-cbi-in-disproportionate-assets-case-today-3180984.html\n <meta name=\"description\" itemprop=\"description\" content=\"On October 5, the CBI conducted raids at 14 locations, including in Karnataka, Delhi and Mumbai at the premises belonging to Shivakumar and others, and recovered Rs 57 lakh cash and several documents, including property documents, bank related information, computer hard disk. \">\n '''\n meta_name_itemprop_description = html.find(\n 'meta', {\n 'name': 'description',\n 'itemprop': 'description'\n })\n description_list.append(\n self.text_cleaning(meta_name_itemprop_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://scroll.in/field/979390/they-can-beat-australia-in-their-own-den-shastri-backs-india-s-fabulous-five-quicks-to-shine\n <meta name=\"dcterms.description\" content=\"The India coach said his team’s pace unit was the best in the world, despite being likely to be without the injured Ishant Sharma.\">\n * https://scroll.in/field/979393/champions-league-last-gasp-wins-take-juventus-chelsea-and-sevilla-into-last-16-barcelona-cruise\n <meta name=\"dcterms.description\" content=\"They are the first teams to make it out of the group stage, doing so with two games to spare.\">\n '''\n meta_name_dcterms_description = html.find(\n 'meta', {'name': 'dcterms.description'})\n description_list.append(\n self.text_cleaning(meta_name_dcterms_description['content']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.express.co.uk/news/weather/1370081/BBC-Weather-Europe-snow-forecast-cold-December-update-video-vn\n <div class=\"text-description\"><p><span>BBC Weather meteorologist Stav Danaos forecast unsettled weather across the&nbsp;</span><span>Mediterranean for the rest of the week. He added a blocking area of high pressure across Russia was contributing to the unsettling weather.</span></p></div>\n * https://www.express.co.uk/news/politics/1383306/Brexit-live-latest-brexit-deal-Northern-Ireland-customs-boris-johnson-john-redwood\n <div class='text-description'><p>Earlier today, Boris Johnson suggested some fishing businesses in Scotland would receive compensation as he defended...</p></div>\n '''\n div_class_text_description = html.find(\n 'div', {'class': 'text-description'})\n description_list.append(\n self.text_cleaning(div_class_text_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.usatoday.com/story/news/nation/2020/12/07/north-atlantic-right-whale-endangered-species-newborns/6484190002/\n <div...data-ss-d=\"Two North Atlantic right whale newborns have been spotted in the last week at the start of calving season, providing hope for an endangered species.\"...>\n * https://www.usatoday.com/story/sports/mls/2020/12/07/mls-cup-2020-seattle-sounders-advance-play-columbus-crew-title/6487291002/\n <div...data-ss-d=\"The Seattle Sounders scored two late goals to complete a dramatic rally over Minnesota United and advance to MLS Cup to play the Columbus Crew.\"...>\n '''\n div_data_ssd = html.find('div', {'data-ss-d': True})\n description_list.append(\n self.text_cleaning(div_data_ssd['data-ss-d']))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.indiatoday.in/technology/news/story/amazon-great-republic-day-sale-announced-from-january-20-deals-bank-offers-and-more-1758622-2021-01-13\n <div class=\"story-kicker\"><h2>Amazon's Great Republic Day Sale begins January 20 but Prime members will get 24 hours early access on deals.</h2></div>\n * https://www.indiatoday.in/sports/cricket/story/a-win-at-gabba-will-give-india-their-greatest-test-series-victory-ever-says-akhtar-1758619-2021-01-13\n <div class=\"story-kicker\"><h2>Former Pakistan fast bowler Shoaib Akhtar lauded India for the fight they have shown in the series so far and said that they should go on to win the final Test in Brisbane.</h2></div>\n '''\n div_class_story_kicker = html.find('div',\n {'class': 'story-kicker'})\n description_list.append(\n self.text_cleaning(div_class_story_kicker.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.espncricinfo.com/story/vitality-t20-blast-mitchell-claydon-misses-sussex-s-t20-blast-defeat-after-hand-sanitiser-ball-tampering-ban-1234150\n <p class=\"article-summary\">Seamer will miss first two games of 2021 as well after nine-match ban imposed by CDC</p>\n * https://www.espncricinfo.com/series/vitality-blast-2020-1207645/nottinghamshire-vs-leicestershire-1st-quarter-final-1207789/match-report\n <p class=\"article-summary\">Nottinghamshire progress on higher Powerplay score after securing dramatic tie off last ball</p>\n '''\n p_class_article_summary = html.find('p',\n {'class': 'article-summary'})\n description_list.append(\n self.text_cleaning(p_class_article_summary.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.nytimes.com/2020/01/31/learning/is-it-offensive-for-sports-teams-and-their-fans-to-use-native-american-names-imagery-and-gestures.html\n <p id=\"article-summary\" class=\"css-w6ymp8 e1wiw3jv0\">The Kansas City Chiefs will face the San Francisco 49ers for Super Bowl LIV. Chiefs fans regularly use a “tomahawk chop” to urge on their beloved team: Is it offensive?</p>\n * https://www.nytimes.com/2020/01/09/world/middleeast/iran-plane-crash-ukraine.html\n <p id=\"article-summary\" class=\"css-w6ymp8 e1wiw3jv0\">Western intelligence showed that Iran was responsible for the plane crash, suggesting that the deaths of those aboard were a consequence of the heightened tensions between Washington and Iran. </p>\n '''\n p_id_article_summary = html.find('p', {'id': 'article-summary'})\n description_list.append(\n self.text_cleaning(p_id_article_summary.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://economictimes.indiatimes.com/industry/services/education/indian-universities-look-abroad-for-success-at-home/articleshow/5957175.cms\n <h2 class=\"summary\">Foreign universities may soon be able to set up in India but some of their Indian counterparts are looking in the other direction — to better equip students for the demands of the global economy.</h2>\n * https://economictimes.indiatimes.com/industry/transportation/railways/conviction-rate-in-theft-cases-in-central-railways-mumbai-division-falls-steeply/articleshow/48554953.cms\n <h2 class=\"summary\">According to official data, the conviction rate in theft cases of railway properties has witnessed a steep fall in Mumbai Division of Central Railway.</h2>\n '''\n h2_class_summary_description = html.find('h2',\n {'class': 'summary'})\n description_list.append(\n self.text_cleaning(h2_class_summary_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://sports.ndtv.com/india-vs-england-2020-21/ind-vs-eng-virat-kohli-reflects-on-battling-depression-during-2014-england-tour-2373999\n <h2 class=\"sp-descp\">India vs England: Virat Kohli opened up about dealing with depression on India's 2014 tour of England where Kohli endured a horror run with the bat.</h2>\n * https://sports.ndtv.com/cricket/we-cant-influence-indian-high-commission-for-visas-pcb-1594242\n <h2 class=\"sp-descp\">Pakistan Cricket Board made it clear that it had done everything under its power to get the visas for its cricketers to play in the IPL next year.</h2>\n '''\n h2_class_sp_descp_description = html.find('h2',\n {'class': 'sp-descp'})\n description_list.append(\n self.text_cleaning(h2_class_sp_descp_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://indianexpress.com/article/news-archive/days-are-not-far-when-kashmiri-pandits-would-return-to-their-homes-with-dignity-jk-bjp-4842449/\n <h2 itemprop=\"description\" class=\"synopsis\">\"Those days are not far when the displaced people will return to their Kashmir with dignity and honour. The BJP will leave no stone unturned in solving the problems of the hapless people who were forced to leave the Valley,\" Jammu and Kashmir BJP unit chief Sat Sharma said. </h2>\n * https://indianexpress.com/article/india/web/bjp-mp-karandlaje-challenges-karnataka-cm-siddaramaiah-govt-to-arrest-her-4996043/\n <h2 itemprop=\"description\" class=\"synopsis\">An FIR was filed against BJP MP Shobha Karandlaje on charges of provoking people to cause riots, disturbing communal harmony and spreading rumours.</h2>\n '''\n h2_itemprop_description = html.find('h2',\n {'itemprop': 'description'})\n description_list.append(\n self.text_cleaning(h2_itemprop_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.business-standard.com/article/current-affairs/death-of-galaxy-galactic-collision-spews-gases-equal-to-10-000-suns-a-year-121011300543_1.html\n <h2 class=\"alternativeHeadline\">The merging galaxy formed 4.5 billion years ago is dubbed ID2299 and is ejecting gases equivalent to 10,000 Suns-worth of gas a year</h2>\n * https://www.business-standard.com/article/international/wb-economist-china-will-need-to-learn-to-restructure-emerging-market-debt-121011300034_1.html\n <h2 class=\"alternativeHeadline\">Increasing debt distress in emerging markets means that China will need to start restructuring debts in the same way that Paris Club lenders did in past crises, World Bank Chief Economist said</h2>\n '''\n h2_class_alternative_headline = html.find(\n 'h2', {'class': 'alternativeHeadline'})\n description_list.append(\n self.text_cleaning(h2_class_alternative_headline.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.express.co.uk/news/world/1369648/India-news-mystery-illness-coronavirus-covid-Andhra-Pradesh-eluru-disease-cause-ont\n <h3>OFFICIALS in India are reportedly seeking to manage panic in the Indian state of Andhra Pradesh due to a mysterious illness spreading in the district.</h3>\n * https://www.express.co.uk/news/politics/1383306/Brexit-live-latest-brexit-deal-Northern-Ireland-customs-boris-johnson-john-redwood\n <h3>A HUGE new fishing row has erupted between Scottish fishermen anf the UK Government, with BBC News Political Editor Laura Kuenssberg warning: \"This could get messy.\"</h3>\n '''\n h3_description = html.find('h3')\n description_list.append(self.text_cleaning(h3_description.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.independent.co.uk/arts-entertainment/tv/news/ratched-netflix-trigger-warning-child-abuse-suicide-violence-sarah-paulson-b571405.html\n <h2 class=\"sc-qYhdC bflsCm\"><p>Despite presence of warning over graphic content, fans have called for more</p></h2>\n * https://www.independent.co.uk/arts-entertainment/tv/news/bridgerton-violet-actor-ruth-gemmell-tracy-beaker-b1780757.html\n <h2 class=\"sc-oTcDH eZHAcN\"><p>Gemmell starred in the 2004 CBBC film Tracy Beaker: The Movie of Me</p></h2>\n '''\n header_id_articleheader = html.find('header',\n {'id': 'articleHeader'})\n header_two = header_id_articleheader.find('h2')\n description_list.append(self.text_cleaning(header_two.text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://scroll.in/article/979318/what-is-the-extent-of-caste-segregation-in-indian-villages-today-new-data-gives-us-an-idea\n <h2>‘The extent of intra-village segregation in Karnataka is greater than the local black-white segregation in the American South.’</h2>\n * https://scroll.in/latest/979410/khichdification-ima-demands-withdrawal-of-move-allowing-ayurveda-doctors-to-perform-surgery\n <h2>The medical body said that the move should not be seen in isolation, referring to other government decisions ‘legitimising Mixopathy’.</h2>\n '''\n header = html.find('header')\n description_list.append(\n self.text_cleaning(header.find_next('h2').text))\n\n with suppress(Exception):\n '''\n Tested on\n * https://www.euronews.com/2020/12/08/charlie-hebdo-trial-prosecutors-request-30-year-sentence-for-fugitive-widow-of-attacker\n <script type=\"application/ld+json\"... '@graph': [\"description\": \"Prosecutors have asked for sentences ranging from 5 years to life imprisonment for the defendants in the Charlie Hebdo trial, including the fugitive widow of one of the attackers.\"...]...>\n * https://www.euronews.com/2020/12/08/france-s-next-aircraft-carrier-to-be-nuclear-powered-macron-confirms\n <script type=\"application/ld+json\"... '@graph': [\"description\": \"France's current flagship warship is to be retired in 2038. It will be replaced by a bigger, nuclear-powered model, Macron said on Tuesday.\"...]...>\n '''\n first_script = html.find('script', {'type': 'application/ld+json'})\n data = json.loads(first_script.string, strict=False)\n description_list.append(\n self.text_cleaning(data['@graph'][0]['description']))\n\n with suppress(Exception):\n scripts = html.find_all('script', {'type': 'application/ld+json'})\n scripts = [script for script in scripts if script is not None]\n for script in scripts:\n with suppress(Exception):\n '''\n Tested on\n * https://www.espncricinfo.com/story/ipl-2020-jofra-archer-thriving-in-different-type-of-pressure-at-ipl-says-rajasthan-royals-team-mate-jos-buttler-1234126\n <script type='application/ld+json'...\"description\":\"Fifty-over cricket must take a back seat in build-up to T20 World Cup, says senior batsman\"...>\n '''\n data = json.loads(script.string, strict=False)\n if isinstance(data, list):\n data = data[0]\n if data[\"@type\"] == \"NewsArticle\" or data[\n \"@type\"] == \"WebPage\":\n if data[\"description\"]:\n description_list.append(\n self.text_cleaning(data[\"description\"]))\n with suppress(Exception):\n data = json.loads(script.string, strict=False)\n if data[\"@type\"] == \"NewsArticle\":\n if isinstance(data[\"video\"], list):\n description_list.append(\n self.text_cleaning(\n data[\"video\"][0][\"description\"]))\n elif not isinstance(data[\"video\"], list):\n description_list.append(\n self.text_cleaning(\n data[\"video\"][\"description\"]))\n description_list = [\n description for description in description_list\n if description != ''\n ]\n if not description_list:\n return \" \"\n best_description = max(sorted(set(description_list)),\n key=description_list.count)\n return best_description", "def get_club_info(url):\n\tbase_url = \"http://fas-mini-sites.fas.harvard.edu/osl/grouplist\"\n\tnum = 0\n\n\tclub_url = base_url + url\n\n\tcategoryArr= []\n\n\tr = rq.get(club_url)\n\tsoup = BeautifulSoup(r.text)\n\tinfoClub = [ '' for i in range(9) ]\n\t#0: clubid\n\tclubid = url.split(\"=\")[-1]\n\tinfoClub[0] = clubid\n\t#1: clubname\n\tinfoClub[1] = soup.find(\"h2\").text\n\t \n\t# info = soup.p.get_text()\n\tinfo = soup.p.get_text().encode('ascii','ignore')\n\t#2: club description\n\tinfoClub[2] = info\n\n\tstuff = soup.ul\n\n\tstuffArray =[]\n\n\tstuffArray.append(stuff.li)\n\n\tcount = 0\n\tfor more in stuff.li.next_siblings:\n\t if (count%2 == 1):\n\t stuffArray.append(more)\n\t count +=1\n\n\t#info New: categories do .a trick\n\n\tcatRaw = BeautifulSoup(str(stuffArray[0]))\n\tcats = catRaw.find_all('a')\n\n\tfor cat in cats:\n\t catStr = []\n\t tempCat = str(cat.get('href'))\n\t catStr.append(clubid)\n\t catStr.append(tempCat[18:])\n\t categoryArr.append(catStr)\n\n\t#info 3: number of members\n\tmemStr = (str(stuffArray[1]))[49:-10]\n\n\t# print memStr\n\tif memStr == '1-9':\n\t memStr = 0\n\telif memStr == '10-25':\n\t memStr = 1\n\telif memStr == '26-50':\n\t memStr = 2\n\telif memStr == '76-100':\n\t memStr =3\n\telse:\n\t memStr = 4\n\t# print memStr\n\n\tinfoClub[3] = str(memStr)\n\n\t#inf 4: involvement\n\tinvolvementStr = str(stuffArray[2])\n\tinfoClub[4] = involvementStr[43:-10]\n\n\t#info 5: group email\n\temailRaw = BeautifulSoup(str(stuffArray[3]))\n\temail = emailRaw.a.get('href')\n\tinfoClub[5] = str(email)\n\n\t#info 6: group website\n\twebRaw = BeautifulSoup(str(stuffArray[4]))\n\tweb = webRaw.a.get('href')\n\tinfoClub[6] = str(web)\n\n\t#info 7: Mailing address\n\tmailingRaw = BeautifulSoup(str(stuffArray[5]))\n\tmail = mailingRaw.ul\n\n\tmailStr = (str(mail.li))[4:-5] + ','\n\n\tcheck = 0\n\tfor line in mail.li.next_siblings:\n\t check +=1\n\t if (check % 2 == 0):\n\t mailStr += (str(line))[4:-5]+ ','\n\n\tmailStr = mailStr[:-1]\n\tif (num != 204):\n\t mailStr.encode('ascii','ignore')\n\n\t if len(mailStr) > 255:\n\t print 'Error: mailing address too long'\n\n\t infoClub[7] = mailStr\n\telse:\n\t infoClubs[7] = \"hardcode\"\n\n\n\t#info 8: month of election\n\tstring1 = str(stuffArray[6])\n\tinfoClub[8] = string1[58:-10]\n\t\n\tprint \"Got all info of\", infoClub[0], infoClub[1]\n\n\treturn infoClub, categoryArr", "def description(soup: str, nb:int):\n desc = []\n for span in soup.findAll('article', attrs={'itemprop': 'review'}):\n dat = str(recovTextBetweenTags(str(span.findAll('time', attrs={\n 'itemprop': 'datePublished'})), ',')).replace(\"['[\", '').replace(\"]']\", '')\n dat = (format_date(dat))\n if (dat) > (datetime.now() - timedelta(nb)):\n top = span.findAll('div', attrs={'class': 'text_content'})\n desc.append(translate(recovTextBetweenTags(str(top), ',')))\n\n return desc", "def get_description(soup_recipe):\n description = soup_recipe.find(\"div\", {\"itemprop\": \"description\"}).find(\"div\",\n {\"class\": \"field-items\"}).find(\"div\")\n if not description:\n return None\n return description.get_text()", "def filter_description(soup):\n\n # TODO: Filter different text sections from description, such as\n # 'NOTE', 'LEARNING HOURS', etc.\n descr_raw = soup.find('span', id=regex_desc)\n\n if not descr_raw:\n return ''\n\n # If <br/> tags exist, there will be additional information other\n # than the description. Filter for description only.\n if descr_raw.find_all('br'):\n return descr_raw.find_all('br')[0].previous_sibling\n\n return descr_raw.text.encode('ascii', 'ignore').decode().strip()", "def extract_product_description(soup):\r\n section = soup.find(\"div\", attrs={\"id\": \"content_inner\"})\r\n if section.find(\"p\", attrs=None):\r\n product_description = (section.find(\"p\", attrs=None).text)\r\n return product_description\r\n else:\r\n return None", "def _parse_description(self, response):\n return \" \".join(response.css(\"article.description > *::text\").extract()).strip()", "def get_info_game(soup):\n info = []\n\n content = soup.select(\"div.fftit.s20.b\").pop()\n info.append(content.span.text)\n info.append(re.search(r'\\((.*?)\\)', content.text).group(1))\n\n for dt, dd in zip(soup.findAll(\"dt\"), soup.findAll(\"dd\")):\n if dt.text == \"Desarrollador:\":\n info.append(dd.text)\n elif dt.text == \"Editor:\":\n info.append(dd.text)\n elif dt.text == \"Género:\":\n info.append(dd.text)\n\n info.append(soup.find(\"span\", {\"itemprop\": \"releaseDate\"}).attrs['content'])\n\n info.extend([div.span.text for div in soup.select(\"div.dtc.wi36\")])\n\n return zip([\"name\", \"platform\", \"study\", \"publisher\", \"genre\", \"releaseDate\", \"3DJuegosScore\", \"userScore\"], info)", "def get_gp_text_description(html):\n m = re.search('<div id=\"doc-description-container\"', html)\n desc_section_start = html[m.start():]\n m = re.search('</div>', desc_section_start)\n desc_section = desc_section_start[:m.start()]\n cleaned_desc = filter(lambda x: x in string.printable, desc_section)\n parser = HTMLParser()\n return parser.unescape(nltk.clean_html(cleaned_desc))", "def get_clubs_html():\n url = 'https://ocwp.apps.pennlabs.org'\n return get_html(url)", "def _parse_description(self, response):\n return re.sub(\n r\"\\s+\", \" \", \" \".join(response.css(\".col-sm-12 > p *::text\").extract())\n ).strip()", "def parse_page(html):\n\n soup = BeautifulSoup(html, \"html.parser\")\n review_soups = soup.find_all(\"script\", type=\"application/ld+json\")\n\n description_list = []\n for soup in review_soups:\n text = soup.string\n # decode the json into python dict\n js_dict = json.loads(text)\n\n if \"review\" in js_dict:\n review_list = js_dict[\"review\"]\n\n for i in range(len(review_list)):\n review_dict = review_list[i]\n description_list.append(review_dict[\"description\"])\n\n return description_list", "def biological_science_news():\n\n return general_scraper(['http://mesva.univaq.it/?q=avvisi/cl-clm/52672'])", "def get_product_description(article):\n product_description_text = article.select(\"p\")\n return product_description_text[3].text", "def getAdditionalDetails(self, soup):\n title_details = soup.find('div', id=\"titleDetails\")\n title_details = title_details.findAll('div', class_=\"txt-block\")\n return title_details", "def get_clubs(soup):\n\n return get_elements_with_class(soup, 'div', 'box')", "def get_description(self):\n des = self.soup.find('div', id = 'zh-topic-desc').find('div', class_ = 'zm-editable-content')\n if des:\n return des.get_text(strip = True).encode(CODE)\n return None", "def club_info(self, cid):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/clubInfo.phtml?cid=' + cid, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n plist = list()\r\n for i in soup.find('table', cellpadding=2).find_all('tr')[1:]:\r\n plist.append('%s\\t%s\\t%s\\t%s\\t%s' % (\r\n i.find_all('td')[0].text, i.find_all('td')[1].text, i.find_all('td')[2].text, i.find_all('td')[3].text,\r\n i.find_all('td')[4].text))\r\n return soup.title.text, plist", "def get_apartment_description(self, soup, apartment_dict):\n\n # Check for apartment description\n description_class = soup.find('div', class_='description')\n if not description_class:\n logging.warning(\"Failed to parse description\")\n return\n\n # Store apartment description\n description_text = ''\n for ptag in description_class.find_all('p'):\n description_text += ptag.text.strip() + ' '\n apartment_dict['description'] = description_text", "def get_city_job(html):\n soup = BeautifulSoup(html, 'html.parser')\n city = soup.find(class_=\"subtle loc\").get_text()\n if city:\n return city\n return None", "def get_club_tags(club):\n\n div = get_elements_with_class(club, 'div', '')[0]\n if len(div) < 1:\n return []\n\n tags = get_elements_with_class(div, 'span', 'tag is-info is-rounded')\n\n return [tag.text for tag in tags]", "def get_info(url):\r\n soup = make_request(url)\r\n\r\n #get press release title\r\n title_text = soup.find(\"h2\", \"con-title\").text.strip()\r\n title = title_text.partition('\\n')[0]\r\n\r\n #get press release content and date\r\n div = soup.find_all(\"div\") #find div tags\r\n for ele in div:\r\n for div2 in ele(\"div\",\"text-right\"):\r\n if \"發佈日期\" in div2.text:\r\n text = ele.text\r\n date = re.findall(\"\\d\\d\\d\\d-\\d\\d-\\d\\d\", div2.text)[0]\r\n break #prevents reiterating upwards to all div parents\r\n return date, title, text", "def _parse_biography(self):\n data = {}\n self.right_column = self.content.find(\"div\", class_=\"rechteSpalte60\")\n heading = self.right_column.find(\"h3\")\n # The page of the second president hides the details information\n # and displays a biography instead. By selecting the second div,\n # we get the hidden div containing the MPs details.\n if not heading:\n self.right_column = self.content.find_all(\"div\", class_=\"rechteSpalte60\")[1]\n data.update(self._parse_dob_job())\n data.update(self._parse_political_mandates())\n data.update(self._parse_political_posts())\n data.update(self._parse_work_history())\n data.update(self._parse_education())\n return data", "def _description(self, item):\n html_item = item['spider_response']\n html = BeautifulSoup(html_item.body, 'html5lib')\n description = \" \"\n try:\n description = self.description_mega(html)\n except Exception as exception: # pylint: disable=broad-except\n logging.exception(exception)\n return description", "def get_webpage_description(self, response):\n desc = response.xpath('//*/meta[@itemprop=\"description\"]/@content').extract_first()\n desc1 = response.xpath('//*/meta[@name=\"description\"]/@content').extract_first()\n desc_length = 50\n if desc1:\n return desc1[:desc_length].strip()\n else:\n if desc:\n return desc[:desc_length].strip()\n else:\n desc = response.xpath('//*/meta[@property=\"description\"]/@content').extract_first()\n if desc:\n return desc[:desc_length].strip()\n else:\n return \"\"", "def _parse_title(self, response):\n title_str = re.sub(\n r\"\\s+\", \" \", \" \".join(response.css(\".soi-container h2 *::text\").extract())\n ).strip()\n return re.sub(\n r\"(Illinois Commerce Commission|(?=Committee )Committee Meeting$)\",\n \"\",\n title_str,\n ).strip()" ]
[ "0.66733843", "0.64373165", "0.6436903", "0.63030756", "0.6302489", "0.62915045", "0.6276174", "0.6223921", "0.6218478", "0.6093889", "0.60505897", "0.59689254", "0.59336525", "0.5850197", "0.58073604", "0.5769218", "0.57675534", "0.5733685", "0.571832", "0.5702604", "0.5660665", "0.5659553", "0.565292", "0.5590436", "0.5576606", "0.5575361", "0.5560169", "0.55561715", "0.553443", "0.5515642" ]
0.7447845
0
Increment the the club favourite amount by either 1 or 1.
def inc_dec_fav_count(clubname, amt): clubs = read_json() for i, club in enumerate(clubs): if club["name"] == clubname: print(clubs[i]) clubs[i]["favourites"] += amt break # Stop loop when the club is found write_json(clubs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def incr_circuit_fav_count(self, circuit_id):\n key = ':'.join(\n [CIRCUIT_NMBR_FAVS_1, \n str(circuit_id), \n CIRCUIT_NMBR_FAVS_2]\n ) \n self.RS.incr(key)", "def increase_count(self, number=1):\n self.count += number", "def like(self, n: int) -> None:\n\n # YOUR CODE HERE\n self.likes += 1", "def increase_score(self):\n self.score += 1", "def inc( self ):\n self.count += 1", "def inc(self):\n \n self.count += 1", "def increment(self, amount):\n pass", "def inc(self):\n self._value += 1", "def increment_counter(self) -> None:", "def favourite(self, favourite):\n\n self._favourite = favourite", "def update(self, result):\n self.visits += 1\n self.wins += result", "def update(self, result: int):\n self.wins += result\n self.visits += 1", "def update_collection_num(user_id, another_user_id, is_add):\n\n user = db_session.query(User).filter_by(user_id=user_id).scalar()\n another_user = db_session.query(User).filter_by(\n user_id=another_user_id).scalar()\n if is_add:\n user.follow_num += 1\n another_user.be_followed_num += 1\n else:\n user.follow_num -= 1\n another_user.be_followed_num -= 1\n db_session.commit()", "def Update(self, result):\n self.visits += 1\n self.wins += result", "def Update(self, result):\n self.visits += 1\n self.wins += result", "def increase_counter(self):\n self.values = self.values + 1", "def increment(cls, value):\r\n value.value += 1", "def update_likes(self):\n self.nb_likes = self.likes.count()\n self.save()", "def add_count(self):\n self.count += 1", "def increase_score(self, increase):\n if increase > 0:\n self.__score += increase", "def increment(self):\r\n return self.add(1)", "def player_increment(prev_player: int) -> int:\n return (prev_player + 1) % 5", "def _increment_turn(self):\r\n\r\n self.turn_number += 1", "def increment_number(self):\n # self.number += 1\n print('fuckwit')\n # print(self.number)", "def score_up(self, increment_by):\n self.user_score += increment_by", "def update_rank(self):\n self.__rank += 1", "def increment(self, inc):\n self.done += inc", "def inc_feature_count(self, f, cat):\n count = self.feature_count(f, cat)\n if count == 0:\n self.con.execute(\"insert into fc values ('%s','%s',1)\" % (f, cat))\n else:\n self.con.execute(\n \"update fc set count=%d where feature='%s' and category='%s'\" \n % (count+1, f, cat))\n self.con.commit()", "def incTurn(self):\n self.turnOn = (self.turnOn+1)%self.turns", "def count_favorite(self, obj):\n\n return obj.recipe_fav.count()" ]
[ "0.65473634", "0.6530351", "0.64911497", "0.6480591", "0.6480568", "0.6459364", "0.64345556", "0.6331934", "0.6280161", "0.6274599", "0.619152", "0.6143345", "0.61095226", "0.6072142", "0.6072142", "0.60591125", "0.6053504", "0.6045872", "0.6042607", "0.60403013", "0.6009018", "0.5965454", "0.59580946", "0.59406096", "0.591781", "0.59080815", "0.58644515", "0.5861842", "0.58495504", "0.5840856" ]
0.72904503
0
Add a new club if no club with that name alread exists. If the club exists, then update its information.
def write_new_club(name, description, categories): clubs = read_json() if name in [club["name"] for club in clubs]: # if club already exists, update it for i, club in enumerate(clubs): if name == club["name"]: updated_club = clubs[i] updated_club["name"] = name updated_club["description"] = description updated_club["categories"] = categories del clubs[i] clubs.append(updated_club) break # stop when correct club is found write_json(clubs) return True else: club_json = {"name": name, "categories": categories, "description": description, "favourites": 0} clubs.append(club_json) # add new club if it doesn't exist write_json(clubs) existing_comments = get_all_comments() existing_comments[name] = [] # add the new club to the comments JSON file. return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_club(self, club):\n sql = ''' INSERT INTO clubs(code_fede, dpt, nb_clubs, year) VALUES(?, ?, ?, ?) '''\n self.__cur.execute(sql, club)\n return self.__cur.lastrowid", "def add_club_comment(user, club, comment):\n with open('club_comments.json') as json_file:\n comments = json.load(json_file)\n if club in comments.keys():\n if comments[club] is None: # If there are no comments associated with the club Python returns None\n comments[club] = [user + \": \" + comment] \n else:\n comments[club].append(user + \": \" + comment)\n with open('club_comments.json', 'w') as outfile:\n json.dump(comments, outfile)\n return True \n else:\n return False # If the specified club name does not exist return False so an error can be specified to the api caller.", "def update(self):\n self.haveClub = len(self.clubs()) > 0", "def add_fake_club(self, points=0, name=\"fake_club\", email=\"[email protected]\"):\n\n server.clubs.append(\n {\n \"name\": f\"{name}\",\n \"email\": f\"{email}\",\n \"points\": f\"{points}\",\n }\n )\n self.clubs = server.clubs\n\n return len(server.clubs) - 1", "def create_club(username, name):\n\n # check username format\n if not username or not re.match(cfg['username_regex'], username):\n raise InvalidArgument(\"username\", username, \"expected format %s\" % repr(cfg['username_regex']))\n \n try:\n request = ceo_pb2.AddUser()\n request.type = ceo_pb2.AddUser.CLUB\n request.username = username\n request.realname = name\n\n out = remote.run_remote('adduser', request.SerializeToString())\n\n response = ceo_pb2.AddUserResponse()\n response.ParseFromString(out)\n\n if any(message.status != 0 for message in response.messages):\n raise MemberException('\\n'.join(message.message for message in response.messages))\n except remote.RemoteException, e:\n raise MemberException(e)\n except OSError, e:\n raise MemberException(e)", "def add_cohort(course_key, name):\r\n log.debug(\"Adding cohort %s to %s\", name, course_key)\r\n if CourseUserGroup.objects.filter(course_id=course_key,\r\n group_type=CourseUserGroup.COHORT,\r\n name=name).exists():\r\n raise ValueError(\"Can't create two cohorts with the same name\")\r\n\r\n return CourseUserGroup.objects.create(\r\n course_id=course_key,\r\n group_type=CourseUserGroup.COHORT,\r\n name=name\r\n )", "def validate_club(self, club):\n request = self.context['request']\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=request.user)\n if club not in profile.get_club_privileges():\n raise serializers.ValidationError(\n \"You are not authorized to create workshops for this club\")\n return club", "def add_division(tournament):\n division_data = json.loads(request.data)\n new_division = Division(\n name = division_data['name']\n )\n tournament.divisions.append(new_division)\n DB.session.add(new_division)\n DB.session.commit()\n return jsonify(to_dict(new_division))", "def add_song(self, name, year, title):\n\n # Here we check if album exist under artist.\n album_found = find_object(name, self.albums)\n if album_found is None: # If there is no album found\n print(name + \"not found\") # we print \"Album name not found\n album_found = Album(name, year, self.name) # Change_3: Pass \"self.name\" instead of \"self\"\n self.add_album(album_found) # We add new_album to song.\n else: # if we found an existing album with same name\n print(\"found album\" + name) # we print found album name\n\n # so we add song to album_found\n album_found.add_song(title)", "def add_album(self):\n item = self.clementine_albums.currentItem()\n albumname = item.text(0) if item else ''\n year = item.data(0, core.Qt.UserRole) if item else ''\n dlg = NewAlbumDialog(self, albumname, year).exec_()\n if dlg != qtw.QDialog.Accepted:\n return\n name, year, is_live = self.data\n if not item:\n result = self.clementine_albums.findItems(name,\n core.Qt.MatchFixedString, 0)\n if result:\n item = result[0]\n if not item:\n qtw.QMessageBox.information(self, self.appname, \"Album doesn't \"\n \"exist on the Clementine side\")\n return\n\n a_item = None\n results = self.albums_albums.findItems(name, core.Qt.MatchFixedString, 0)\n data = [build_album_name(x) for x in results]\n if results:\n selected, ok = qtw.QInputDialog.getItem(self, self.appname,\n 'Select Album', data,\n editable=False)\n if ok:\n a_item = results[data.index(selected)]\n if not a_item:\n a_item = qtw.QTreeWidgetItem([name, year, '0'])\n self.albums_albums.addTopLevelItem(a_item)\n tracklist = dmlc.list_tracks_for_album(dmlc.DB, self.c_artist,\n item.text(0))\n num = itertools.count(1)\n self.albums_to_save[self.c_artist].append(\n (name, year, 'X', is_live,\n [(next(num), x['title']) for x in tracklist if x['track'] > -1]))\n self.update_item(a_item, item)", "def add_league(inp_to_add, type_to_add, con, host, root, password):\r\n with con.cursor() as cur:\r\n if type_to_add == \"url\":\r\n league_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n league_site = inp_to_add\r\n elif type_to_add == \"country\":\r\n midterm_url = get_countries_dict()[inp_to_add]\r\n league_soup = BeautifulSoup(requests.get(midterm_url).text, 'html.parser')\r\n league_site = SOCCER_URL + league_soup.find('ul', class_=\"left-tree\").li.a[\"href\"]\r\n else:\r\n league_soup, league_site = get_first_search_result(\r\n SOCCER_URL + \"/search/competitions/?q=\" + inp_to_add)\r\n\r\n if league_soup:\r\n cur.execute(\"SELECT MAX(id) FROM leagues\")\r\n league_id = cur.fetchall()[0][0]\r\n\r\n addition = (league_soup.body.h1.text, league_soup.body.h2.text, league_site)\r\n cur.execute(\"\"\"INSERT INTO leagues (name, country, url) VALUES (%s, %s, %s)\"\"\", addition)\r\n con.commit()\r\n\r\n league_dict = {league_id: {'name': addition[0], 'url': addition[2]}}\r\n add_all_teams_and_players_in_league(league_dict, con, host, root, password)", "def add_sdcube(self, mapping, name=None):\n with h5py.File(self.filename, 'r') as h5_file:\n sdcubes = load_attribute(h5_file, 'sdcubes')\n if name in sdcubes:\n logging.error('A group with the name %s alread exists' % name)\n raise KeyError('A group with the name %s alread exists' % name)\n for key in sdcubes:\n if name.lower() == key:\n raise Warning('%s looks like %s!' % (name, key))\n raise KeyError('%s looks like %s!' % (name, key))\n if not name:\n name = str(len(sdcubes)) #name will be a number\n \n #TODO: until we decide if filenames are needed\n #if not filename:\n # filename = self.filename\n filename = self.filename\n\n\n SdCube(name, filename, mapping)\n with h5py.File(self.filename, 'a') as h5_file:\n sdcubes = load_attribute(h5_file, 'sdcubes')\n sdcubes[name] = filename\n store_attribute(h5_file, 'sdcubes', sdcubes)\n\n # FIXME: it's better for this method to return the added\n # subcube, rather than the name (which, it not already known\n # to the caller, can be accessed through the returned cube's\n # \"name\" attribute)\n\n return name", "def insert_champion_info(champion_id, key, name, title):\n conn = get_connect()\n cursor = conn.execute(\"SELECT * FROM championInfo where championId = ?\", [champion_id])\n result_list = cursor.fetchall()\n if len(result_list) == 0:\n conn.execute(\"INSERT INTO championInfo \\\n VALUES (?, ?, ?, ?)\", [champion_id, key, name, title])\n print(\"championInfo of \" + str(champion_id) + \" is inserted\")\n else:\n print(\"championInfo of \" + str(champion_id) + \" already exists!\")\n conn.commit()\n conn.close()\n return", "def add(self, database):\n if not database.session:\n logger.error(\"no database session\")\n return False\n\n id = database.session.query(Sport.id).filter(\n Sport.name == self.name).first()\n if id:\n # this sport already exists\n self.id = id[0]\n return False\n else:\n # create a new one and flush it immediately in order to update the id\n try:\n database.session.add(self)\n database.session.flush()\n except exc.SQLAlchemyError as e:\n logger.error(\"Database error: {}\".format(e.args))\n return False\n logger.info(\"Added new sport '{}' id {}\".format(self.name, self.id))\n return True", "def update_club_stats_for_club(club):\n LOG.info(\"Updating Club stats for {}\".format(club))\n s_entries = ClubStats.objects.filter(club=club)\n\n # A dictionary for Club Stats entries, keyed by the team id\n s_lookup = {}\n\n totals = None\n # Reset all the entries\n for stat in s_entries:\n stat.reset()\n if not stat.is_club_total():\n s_lookup[stat.team_id] = stat\n else:\n totals = stat\n\n # Get all match results against this club\n matches = Match.objects.results().filter(our_team__rivals=True, our_score__isnull=False,\n opp_score__isnull=False, opp_team__club=club).select_related('our_team', 'opp_team__club')\n\n # Update with match stats\n for match in matches:\n if not match.our_team_id in s_lookup:\n s_lookup[match.our_team_id] = ClubStats(\n team=match.our_team, club=club)\n s_lookup[match.our_team_id].add_match(match)\n\n # Update totals\n if totals is None:\n totals = ClubStats(club=club)\n for stat in s_lookup.values():\n totals.accumulate_stats(stat)\n totals.save()\n\n # Save all updated stats back to the database\n for _, value in s_lookup.items():\n value.save()\n\n return totals", "async def add(self, ctx, game):\n\n user = ctx.message.author\n\n if add(game, user.id):\n await self.bot.say(\"{}, {} was added to your library.\".format(user.mention, game))\n else:\n await self.bot.say(\"{}, you already have this game in your library.\".format(user.mention))", "def club_id(self, club_name):\r\n # UTF-8 comparison\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find('table', cellpadding=2).find_all('tr'):\r\n # Get teamid from the bets\r\n team1 = i.find('a')['title']\r\n team2 = i.find_all('a')[1]['title']\r\n if club_name == team1:\r\n return i.find('a')['href'].split('cid=')[1]\r\n elif club_name == team2:\r\n return i.find_all('a')[1]['href'].split('cid=')[1]\r\n return None", "def addMember(self, member_name):\n connection = self.sock\n message = \"member_add\".encode()\n connection.send(message)\n status_code = connection.recv(2)\n\n if status_code != FAILURE:\n print(\"Error\")\n return False\n\n message = member_name.encode()\n connection.send(message)\n result = connection.recv(2)\n if result == SUCCESS:\n return True\n else:\n return False", "def add_team(inp_to_add, type_to_add, host, root, password):\r\n team_name = \"\"\r\n\r\n if type_to_add == \"url\":\r\n team_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n team_site = inp_to_add\r\n else:\r\n team_soup, team_site = get_first_search_result(\r\n SOCCER_URL + \"/search/teams/?q=\" + inp_to_add)\r\n\r\n if team_soup:\r\n # Need to examine if league already exists, if not - add it. Then, get its LEAGUE_ID\r\n league_url = SOCCER_URL + team_soup.find('div', id=\"page_team_1_block_team_table_9-wrapper\").h2.a[\"href\"]\r\n find_league({league_url}, \"url\", host, root, password)\r\n\r\n team_name = team_soup.find(\"table\", class_=\"leaguetable sortable table\").tbody.find_all(\r\n 'tr', class_=[\"odd highlight team_rank\", \"even highlight team_rank\"])[0].find(\r\n 'td', class_=\"text team large-link\").a.text\r\n\r\n return team_name", "async def clublog(self, ctx, download=None):\n\n old_clubs = deepcopy(await self.clubs.getClubs())\n new_clubs = deepcopy(old_clubs)\n\n count = 0\n for clubkey in self.clubs.keysClubs():\n try:\n club = self.brawl.get_club(await self.clubs.getClubData(clubkey, 'tag'))\n except brawlstats.RequestError:\n print(\"CLANLOG: Cannot reach Brawl Stars Servers.\")\n return\n\n count += club.members_count\n one_club = {}\n for member in club.members:\n tag = member.tag\n one_club[tag] = {}\n one_club[tag][\"tag\"] = tag\n one_club[tag][\"name\"] = member.name\n if download is not None:\n await self.clubs.addMember(clubkey, member.name, tag)\n new_clubs[clubkey]['members'] = one_club\n\n if download is not None:\n return\n\n if self.bs_last_count != count:\n self.update_bs_member_log()\n current_time = get_time()\n self.bs_member_log[str(current_time)] = count\n self.bs_last_count = count\n\n saved_times = list(self.bs_member_log.keys())\n for time in saved_times:\n if (current_time - float(time)) > 2678400: # one month\n self.bs_member_log.pop(time, None)\n self.save_bs_member_log()\n\n server = ctx.message.server\n\n for clubkey in old_clubs.keys():\n for member in old_clubs[clubkey][\"members\"].keys():\n if member not in new_clubs[clubkey][\"members\"]:\n memberName = old_clubs[clubkey][\"members\"][member][\"name\"]\n memberTag = old_clubs[clubkey][\"members\"][member][\"tag\"]\n await self.clubs.delMember(clubkey, memberTag)\n\n title = \"{} (#{})\".format(await self.tags.formatName(memberName), memberTag)\n desc = \"left **{}**\".format(old_clubs[clubkey][\"name\"])\n embed_left = discord.Embed(title=title,\n url=\"https://brawlstats.com/profile/{}\".format(memberTag),\n description=desc,\n color=0xff0000)\n\n if server.id == \"515502772926414933\":\n channel = await self.clubs.getClubData(clubkey, 'log_channel')\n if channel is not None:\n try:\n await self.bot.send_message(discord.Object(id=channel), embed=embed_left)\n except discord.errors.NotFound:\n await self.bot.say(\"<#{}> NOT FOUND\".format(channel))\n except discord.errors.Forbidden:\n await self.bot.say(\"No Permission to send messages in <#{}>\".format(channel))\n\n await self.bot.say(embed=embed_left)\n\n for clubkey in self.clubs.keysClubs():\n for member in new_clubs[clubkey][\"members\"].keys():\n if member not in old_clubs[clubkey][\"members\"]:\n memberName = new_clubs[clubkey][\"members\"][member][\"name\"]\n memberTag = new_clubs[clubkey][\"members\"][member][\"tag\"]\n await self.clubs.addMember(clubkey, memberName, memberTag)\n\n title = \"{} (#{})\".format(await self.tags.formatName(memberName), memberTag)\n desc = \"joined **{}**\".format(old_clubs[clubkey][\"name\"])\n\n embed_join = discord.Embed(title=title,\n url=\"https://brawlstats.com/profile/{}\".format(memberTag),\n description=desc,\n color=0x00ff40)\n\n if server.id == \"515502772926414933\":\n channel = await self.clubs.getClubData(clubkey, 'log_channel')\n if channel is not None:\n try:\n await self.bot.send_message(discord.Object(id=channel), embed=embed_join)\n except discord.errors.NotFound:\n await self.bot.say(\"<#{}> NOT FOUND\".format(channel))\n except discord.errors.Forbidden:\n await self.bot.say(\"No Permission to send messages in <#{}>\".format(channel))\n\n await self.bot.say(embed=embed_join)", "def add_user_to_course_cohort(cohort_name, course_id, user):\n if cohort_name is not None:\n cohort = get_cohort_by_name(course_id, cohort_name)\n try:\n add_user_to_cohort(cohort, user)\n except ValueError:\n # user already in cohort, probably because they were un-enrolled and re-enrolled\n logger.exception('Cohort re-addition')", "async def create_guild(self, name, guild_id):\n\n await self.db[str(guild_id)].insert_one({'name': name})", "def add_song(self, name, year, title):\n album_found = find_object(name, self.albums)\n if album_found is None:\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n album_found.add_song(title)", "def addTeam(teaminfo):\r\n team, auto, rc_comp, spirit_comp, video_comp = teaminfo\r\n if team_exists(team): # Team already exists\r\n print(\"Team\", team, \"already exists.\")\r\n else:\r\n with sqlite3.connect(database_file) as conn:\r\n #(teamname TEXT, autonomous TEXT, rc TEXT, spirit INT, video INT)\r\n conn.execute(\"INSERT INTO scores(teamname, autonomous, rc, spirit, video)\\\r\n VALUES('{0}', '{1}', '{2}', '{3}', '{4}');\".format(team, auto, rc_comp, spirit_comp, video_comp))", "def add():\n prev_courses = Course._file.read_db()\n course_name = input(\"Please, type course name >\")\n # check course for uniqueness/ instantiating blank class with one attribute\n c = Course(course_name)\n if c.is_course_exists():\n print(\"{} is already exists\".format(course_name))\n return\n\n prev_courses[\"courses\"].append({\n \"course_name\": course_name,\n \"teacher\": input(\"Please, type teacher's email >\"),\n \"total_place\": int(input(\"Please, type total enrolled number >\")),\n \"students\": []\n })\n Course._file.write_db(prev_courses)\n print(\"New course - {} is added\".format(course_name))\n return", "def test_sad_purchasePlaces_wrong_club(self):\n\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": 1,\n \"club\": \"fake_club_name\",\n \"competition\": self.competitions[0][\"name\"],\n },\n )\n\n assert rv.status_code in [404]\n assert b\"The provided club is invalid\" in rv.data", "async def __add(self, ctx, name: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n if name is None:\n name = author\n if server.id not in self.db:\n self.db[server.id] = {}\n if \"bookkeeper\" not in self.db[server.id]:\n self.db[server.id][\"bookkeeper\"] = []\n if name.id in self.db[server.id][\"bookkeeper\"]:\n await self.bot.say(\"{} is already registered as a bookkeeper\".format(name.display_name))\n else:\n self.db[server.id][\"bookkeeper\"].append(name.id)\n self.save_db()\n await self.bot.say(\"{} has been registered as a bookkeeper.\".format(name.display_name))", "def get_club_info(url):\n\tbase_url = \"http://fas-mini-sites.fas.harvard.edu/osl/grouplist\"\n\tnum = 0\n\n\tclub_url = base_url + url\n\n\tcategoryArr= []\n\n\tr = rq.get(club_url)\n\tsoup = BeautifulSoup(r.text)\n\tinfoClub = [ '' for i in range(9) ]\n\t#0: clubid\n\tclubid = url.split(\"=\")[-1]\n\tinfoClub[0] = clubid\n\t#1: clubname\n\tinfoClub[1] = soup.find(\"h2\").text\n\t \n\t# info = soup.p.get_text()\n\tinfo = soup.p.get_text().encode('ascii','ignore')\n\t#2: club description\n\tinfoClub[2] = info\n\n\tstuff = soup.ul\n\n\tstuffArray =[]\n\n\tstuffArray.append(stuff.li)\n\n\tcount = 0\n\tfor more in stuff.li.next_siblings:\n\t if (count%2 == 1):\n\t stuffArray.append(more)\n\t count +=1\n\n\t#info New: categories do .a trick\n\n\tcatRaw = BeautifulSoup(str(stuffArray[0]))\n\tcats = catRaw.find_all('a')\n\n\tfor cat in cats:\n\t catStr = []\n\t tempCat = str(cat.get('href'))\n\t catStr.append(clubid)\n\t catStr.append(tempCat[18:])\n\t categoryArr.append(catStr)\n\n\t#info 3: number of members\n\tmemStr = (str(stuffArray[1]))[49:-10]\n\n\t# print memStr\n\tif memStr == '1-9':\n\t memStr = 0\n\telif memStr == '10-25':\n\t memStr = 1\n\telif memStr == '26-50':\n\t memStr = 2\n\telif memStr == '76-100':\n\t memStr =3\n\telse:\n\t memStr = 4\n\t# print memStr\n\n\tinfoClub[3] = str(memStr)\n\n\t#inf 4: involvement\n\tinvolvementStr = str(stuffArray[2])\n\tinfoClub[4] = involvementStr[43:-10]\n\n\t#info 5: group email\n\temailRaw = BeautifulSoup(str(stuffArray[3]))\n\temail = emailRaw.a.get('href')\n\tinfoClub[5] = str(email)\n\n\t#info 6: group website\n\twebRaw = BeautifulSoup(str(stuffArray[4]))\n\tweb = webRaw.a.get('href')\n\tinfoClub[6] = str(web)\n\n\t#info 7: Mailing address\n\tmailingRaw = BeautifulSoup(str(stuffArray[5]))\n\tmail = mailingRaw.ul\n\n\tmailStr = (str(mail.li))[4:-5] + ','\n\n\tcheck = 0\n\tfor line in mail.li.next_siblings:\n\t check +=1\n\t if (check % 2 == 0):\n\t mailStr += (str(line))[4:-5]+ ','\n\n\tmailStr = mailStr[:-1]\n\tif (num != 204):\n\t mailStr.encode('ascii','ignore')\n\n\t if len(mailStr) > 255:\n\t print 'Error: mailing address too long'\n\n\t infoClub[7] = mailStr\n\telse:\n\t infoClubs[7] = \"hardcode\"\n\n\n\t#info 8: month of election\n\tstring1 = str(stuffArray[6])\n\tinfoClub[8] = string1[58:-10]\n\t\n\tprint \"Got all info of\", infoClub[0], infoClub[1]\n\n\treturn infoClub, categoryArr", "def test_add_category_existing_name(self):\n category = json.dumps({\n 'name': 'Asian',\n })\n self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n response = self.client.post('/category', data=category,\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 409)\n self.assertIn('Name Asian exists', response.data.decode())", "def add_to_cluster(self, item: str, c_id: Optional[str]) -> None:\n if item in self._clusters.keys(): # Check if conflicting add\n assert self._clusters[item] == c_id\n assert c_id is None or c_id in self._clusters.values() # Cluster already exists\n self._clusters[item] = c_id\n self.store()" ]
[ "0.6627244", "0.64997935", "0.62884676", "0.5672625", "0.5598938", "0.54096884", "0.5290357", "0.5247473", "0.5242618", "0.50834304", "0.50773954", "0.50768083", "0.50752026", "0.5064006", "0.5048289", "0.50311404", "0.50113213", "0.5007416", "0.50011957", "0.49295354", "0.49208528", "0.49106064", "0.4903292", "0.4891729", "0.4883058", "0.4870605", "0.4860415", "0.48393893", "0.4830003", "0.48233637" ]
0.68238854
0
Adds a favourites field to the json of scraped data.
def add_favourites_field(): existing = read_json() if 'favourites' not in existing[0].keys(): # if the field has not already been added, add it. for club in existing: club['favourites'] = 0 write_json(existing)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def favourite(self, favourite):\n\n self._favourite = favourite", "def favorites(self):\n path = self._get_path('favorites')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return self._clean_return(response)", "def favorite(self):\n url = \"https://api.imgur.com/3/album/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method=\"POST\")", "def save_to_favorites_list():\n\n #get show id from the event handler/post request\n show_id = str(request.form.get(\"id\"))\n #get button content from the event handler/post request\n button_content = request.form.get(\"button_content\")\n\n button_content_encoded = button_content.encode('utf-8')\n\n #save utf-8 encoded checkmark as a string variable\n check_mark = \"\\xe2\\x9c\\x93\"\n\n #find the current logged in user\n email = session.get(\"current_user\")\n\n if email:\n\n #use email to find the user_id\n user_id = User.find_user_id_with_email(email)\n\n #if the show has not been favorited yet\n if check_mark not in button_content_encoded:\n #add row in favorites table\n favorite = Favorite.add_to_favorites(show_id, user_id)\n\n #pass back the show_id and that the show has been favorited\n payload = {\"show_id\":show_id,\"favorite\":\"True\"}\n return jsonify(payload)\n else:\n #delete row in favorites table\n Favorite.delete_favorite(show_id)\n\n #pass back the show_id and that the show has been unfavorited\n payload = {\"show_id\":show_id,\"favorite\":\"False\"}\n return jsonify(payload)\n else:\n flash(\"You need to be logged in to see that page.\")\n return redirect(\"/login\")", "def mark_favorite(request, object_id):\n feed_item = get_object_or_404(FeedItem, id=object_id)\n fav_item, is_new = FavoriteItem.objects.get_or_create(feed_item=feed_item)\n if request.is_ajax():\n return JSONResponse({'status': 'ok', 'text': 'Marked as favorite'}, False)\n return redirect(request.META.get('HTTP_REFERER', 'feed_item_list'))", "def favorite(self):\n url = \"https://api.imgur.com/3/image/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')", "def addToFavorites(self, shortName, absPath):\n logger.debug(\"Func: addToFavorites\")\n\n # old Name userFavoritesAdd\n bookmarksData = self.loadFavorites()\n bookmarksData.append([shortName, absPath])\n self._dumpJson(bookmarksData, self._pathsDict[\"bookmarksFile\"])\n return bookmarksData", "def _getFavorites(self):\n url = self._genFavoritesUrlByUser(self._username)\n doc = html.document_fromstring(requests.get(url).text)\n out = dict()\n pages = get_pages(doc)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = str(f.attrib['href']).split('/')[-2]\n # topic_id =\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n for p in range(2, pages):\n url = 'http://habrahabr.ru/users/{0}/favorites/page{1}/'.format(self._username, p)\n # if show_progress:\n # print('parsing page{0}... url={1}'.format(p, url))\n doc = html.document_fromstring(requests.get(url).text)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = f.attrib['href'][-7:-1]\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n return out", "def get_favorites(self):\n url = \"https://api.imgur.com/3/account/{0}/favorites\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True)\n return [_get_album_or_image(thing, self) for thing in resp]", "def update_favourites(self, item_info, status):\r\n if status == \"Add\":\r\n return self.model.add_to_favourites(item_info)\r\n elif status == \"Remove\":\r\n return self.model.delete_from_favourites(item_info)", "def add_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one(\r\n {\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$push\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": 1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))", "def favorites(self):\n if not self._user_favorites_loaded:\n self._user_favorites = self._getFavorites()\n self._user_favorites_loaded = True\n return deepcopy(self._user_favorites)", "def auto_fav(q, count=5, result_type=\"recent\"):\n\n result = search_tweets(q, count, result_type)\n\n for tweet in result[\"statuses\"]:\n try:\n # don't favorite your own tweets\n if tweet[\"user\"][\"screen_name\"] == TWITTER_HANDLE:\n continue\n\n result = t.favorites.create(_id=tweet[\"id\"])\n print(\"favorited: %s\" % (result[\"text\"].encode(\"utf-8\")))\n\n # when you have already favorited a tweet, this error is thrown\n except TwitterHTTPError as e:\n print(\"error: %s\" % (str(e)))", "def update_favorite_things():\n data = request.data\n favorite_things = json.loads(data)\n print(favorite_things)\n connection = mongo_connect()\n if(favorite_things[\"action\"] == \"add\"):\n connection[\"users\"].update_one(\n {\"_id\": ObjectId(session[\"_id\"])},\n {\"$push\": {\n favorite_things[\"type\"]: ObjectId(favorite_things[\"item_id\"])\n }\n }\n )\n else:\n connection[\"users\"].update_one(\n {\"_id\": ObjectId(session[\"_id\"])},\n {\"$pull\":\n {\n favorite_things[\"type\"]:\n ObjectId(favorite_things[\"item_id\"])\n }\n }\n )\n resp = jsonify(success=True)\n return resp", "def cmd_account_favorites(client, args):\n account_favorites = client.get_account_favorites(args.username)\n data = [item.__dict__ for item in account_favorites]\n generate_output({'account_favorites': data}, args.output_file)", "def add_favorite(self, id):\n path = self._get_path('alter_favorite').format(id=id)\n \n return self._clean_return(self._PUT(path))", "def inc_dec_fav_count(clubname, amt):\n clubs = read_json()\n\n for i, club in enumerate(clubs):\n if club[\"name\"] == clubname:\n print(clubs[i])\n clubs[i][\"favourites\"] += amt\n break # Stop loop when the club is found\n write_json(clubs)", "def test_display_favorite(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"s1925148\", result.data)", "def getUserFavorites(request, userid):\n try:\n User.objects.get(id=userid)\n favList = list(Favorite.objects.filter(user=userid).values())\n favInfoDict = {}\n num = 0\n\n for fav in favList:\n try:\n favItem = Item.objects.get(id=fav.get(\"item_id\")) \n favInfoDict[num] = model_to_dict(favItem)\n num = num + 1\n \n except Item.DoesNotExist:\n favInfoDict[\"favorite\"] = \"doesnotexist\"\n\n return JsonResponse(favInfoDict)\n\n except User.DoesNotExist:\n fail = {\n \"user\":\"doesnotexist\"\n }\n return JsonResponse(fail)", "def get_favorites(request):\n companies = request.user.profile.companies.all()\n context = {'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)", "def SetNewFav(self, newFav):\n self.favouriteFood = newFav", "def set_favorite(request):\n company_id = request.data.get('id')\n company = Company.objects.get(id=company_id)\n\n request.user.profile.companies.add(company)\n return Response({'favorite': True})", "def toggle_favorite(self, user, article, is_favoriting):\n if user not in article.favorited_by.all() and is_favoriting:\n article.favorited_by.add(user)\n if user in article.favorited_by.all() and not is_favoriting:\n article.favorited_by.remove(user)\n article.favoritesCount = article.favorited_by.all().count()\n article.save()", "def favorite():\n # handle pre-flight for browsers CORS access\n if request.method == \"OPTIONS\":\n return generate_response()\n # part1: verify the token\n checked_and_verified, response = check_verify_token(request,login_session)\n if checked_and_verified == False: return response\n # handle the edge case where user is authorized to perform create user but not other method\n if not is_loggedin(login_session):\n response = generate_message(MESSAGE_USER_NOT_LOGGED_IN,401)\n return response\n # handles the get request\n if request.method == \"GET\":\n favorites = read_criteria(Favorite,{\"user_id\":login_session[\"user_id\"]},session,\"m\") or []\n favorites_room_json = [room_json(favorite.room, session,app.config[\"OFFLINE_TESTING\"], login_session) for favorite in favorites]\n return generate_response(elem={\"favorites\":favorites_room_json})\n # part2: check json, handle POST request\n checked_json, response, requested_json = check_json_form(request,MESSAGE_BAD_JSON,MESSAGE_GET_FAV_NO_JSON)\n if checked_json != True: return response\n # verify room id type, with strict mode\n requested_json[\"user_id\"] = login_session[\"user_id\"]\n correct_format,valid_update_pairs, response = process_request_json(Favorite,requested_json, True, access_mode=\"read\",nondb_type_map={\"action\":str})\n if correct_format == False: \n return response\n room = get_row_if_exists(Room, session, ** {\"id\": requested_json[\"room_id\"]})\n user = get_row_if_exists(User, session, ** {\"id\": login_session[\"user_id\"]})\n # if the room id in the request doesn't fit any entry in db, return error message\n if room is None:\n response = generate_message(MESSAGE_FAV_ROOM_NOT_EXIST,404)\n return response\n if requested_json[\"action\"] == \"add\":\n # the add favorite already handle duplicates add\n # it treats multiple adds as one add and every duplicate add afterwards is counted as success\n add_favorite(room,user, session)\n response = generate_message(MESSAGE_POST_FAV_ADD_SUCCESS,201)\n return response\n elif requested_json[\"action\"] == \"delete\":\n # the delete favorite already handle duplicates delete\n # it treats multiple delete as one delete and every duplicate delete afterwards is counted as success\n remove_entry(Favorite,requested_json[\"room_id\"], session)\n response = generate_message(MESSAGE_POST_FAV_DEL_SUCCESS,200)\n return response\n else: # method not supported\n response = generate_message(MESSAGE_POST_FAV_METHOD_NOT_SUPPORTED,405)\n return response", "def add_favorite(self, pk: int) -> Response:\n try:\n TagDAO.favorite_tag_by_id_for_current_user(pk)\n return self.response(200, result=\"OK\")\n except TagNotFoundError:\n return self.response_404()\n except MissingUserContextException as ex:\n return self.response_422(message=str(ex))", "async def create(self, favorite: Favorite) -> Favorite:", "def favorite(request, movie_id):\n\n movie = get_object_or_404(Movie, pk=movie_id)\n try:\n if movie.liked:\n movie.liked = False\n else:\n movie.liked = True\n movie.save()\n except (KeyError, Movie.DoesNotExist):\n return JsonResponse({'success': False})\n else:\n return JsonResponse({'success': True})", "def add_favorites(request):\n try:\n db_logger.info(\"DEBUT cart/add_favorites\")\n data: dict = json.loads(request.body) # get json body\n # get cart\n cart: Cart = Cart(request)\n db_logger.info(f\"cart => {cart}\")\n\n # adding articles to cart\n for item in data:\n qs_product: [Article] = Article.objects.filter(id=int(item['article']))\n add_to_cart(qs_product, int(item['quantity']), cart)\n db_logger.info(f\"qs_product => {qs_product}\")\n\n db_logger.info(\"FIN cart/add_favorites\")\n return JsonResponse({\"msg\": \"success\"}, status=200)\n\n except Exception as e:\n db_logger.exception(f\"erreur cart/cart_add => {e}\")\n return JsonResponse({\"msg\": \"error\"}, status=500)", "def favourites_read(self, data, sesh):\n\n\t\t# Fetch the favourites for the thrower\n\t\tlFavourites = Favourites.get(sesh['thrower']['_id'], raw=['ids'])\n\n\t\t# If there's none\n\t\tif not lFavourites:\n\t\t\treturn Services.Effect([])\n\n\t\t# Look up all the throwers using the IDs\n\t\tlThrowers = Thrower.get(lFavourites['ids'], raw=['_id', 'alias'])\n\n\t\t# Return what's found\n\t\treturn Services.Effect(lThrowers)", "def get_favorite(self, obj):\n article_fav_users = obj.favorite.all()\n return self.fetch_usernames(article_fav_users)" ]
[ "0.65756124", "0.6529356", "0.6501094", "0.62727886", "0.61769885", "0.6144672", "0.61340266", "0.60702753", "0.6036172", "0.6008245", "0.6002118", "0.59143424", "0.5912242", "0.58768296", "0.58584183", "0.58416903", "0.58340627", "0.5818007", "0.5809916", "0.5793181", "0.57400817", "0.5688812", "0.56780815", "0.5638019", "0.5635724", "0.56309646", "0.5585897", "0.55839545", "0.55794054", "0.5556132" ]
0.8033423
0
Create the file that stores comments with all of the clubs that are in the clubs json file.
def create_comment_file(): club = read_json() comment_dict = {} for club in clubs: comment_dict[club.name] = [] with open('club_comments.json', 'w') as outfile: json.dump(comment_dict, outfile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_club_comment(user, club, comment):\n with open('club_comments.json') as json_file:\n comments = json.load(json_file)\n if club in comments.keys():\n if comments[club] is None: # If there are no comments associated with the club Python returns None\n comments[club] = [user + \": \" + comment] \n else:\n comments[club].append(user + \": \" + comment)\n with open('club_comments.json', 'w') as outfile:\n json.dump(comments, outfile)\n return True \n else:\n return False # If the specified club name does not exist return False so an error can be specified to the api caller.", "def write_new_club(name, description, categories):\n clubs = read_json()\n\n if name in [club[\"name\"] for club in clubs]: # if club already exists, update it\n\n for i, club in enumerate(clubs):\n if name == club[\"name\"]:\n updated_club = clubs[i]\n updated_club[\"name\"] = name\n updated_club[\"description\"] = description\n updated_club[\"categories\"] = categories\n del clubs[i]\n clubs.append(updated_club)\n break # stop when correct club is found\n\n write_json(clubs)\n return True\n else: \n club_json = {\"name\": name, \"categories\": categories, \"description\": description,\n \"favourites\": 0}\n clubs.append(club_json) # add new club if it doesn't exist\n write_json(clubs)\n\n existing_comments = get_all_comments()\n existing_comments[name] = [] # add the new club to the comments JSON file.\n\n return False", "def create_chceckfile(artist_list):\n with open(\"Udemy_Course/Object_Oriented_Programing_and_Classes/OOP_Song_Class/checkfile.txt\", \"w\") as checkfile:\n for new_artist in artist_list:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks:\n print(\"{0.name}\\t{1.name}\\t{1.year}\\t{2.title}\".format\n (new_artist, new_album, new_song), file=checkfile)", "def write_coco_json(filepath, dataset_dicts, name_to_id, **kwargs):\n info = {\n \"description\": kwargs.get(\"description\", \"\"),\n \"url\": kwargs.get(\"url\", \"\"),\n \"version\": kwargs.get(\"version\", \"0.0\"),\n \"year\": kwargs.get(\"year\", \"2017\"),\n \"contributor\": kwargs.get(\"contributor\", \"\"),\n \"date_created\": kwargs.get(\"date_created\", \"2017/01/01\"),\n }\n\n licenses = {\n \"url\": \"closed\",\n \"id\": 0,\n \"name\": \"closed\",\n }\n\n images, annotations = [], []\n annotation_id = 1\n for record in dataset_dicts:\n images.append({\n \"id\": record[\"image_id\"],\n \"width\": record[\"width\"],\n \"height\": record[\"height\"],\n \"file_name\": record[\"file_name\"]\n })\n\n for annotation in record[\"annotations\"]:\n x0, y0, x1, y1 = annotation[\"bbox\"]\n annotations.append({\n \"id\": annotation_id,\n \"category_id\": annotation[\"category_id\"],\n \"bbox\": [x0, y0, x1 - x0, y1 - y0],\n \"iscrowd\": annotation[\"iscrowd\"],\n \"image_id\": record[\"image_id\"],\n \"area\": (x1 - x0) * (y1 - y0),\n })\n annotation_id += 1\n\n categories = [{\n \"id\": category_id,\n \"name\": \"{}\".format(category_name),\n \"supercategory\": \"\"\n } for category_name, category_id in name_to_id.items()]\n\n coco_dict = {\n \"info\": info,\n \"licenses\": licenses,\n \"images\": images,\n \"annotations\": annotations,\n \"categories\": categories,\n }\n\n with filepath.open(mode=\"w\") as file_handle:\n json.dump(coco_dict, file_handle)", "def write_json(toWrite):\n with open('clubs.json', 'w+') as outfile:\n json.dump(toWrite, outfile)", "def main():\n os.makedirs(\"../json-data\", exist_ok=True)\n # num_docs = 1005\n num_docs = int(sys.argv[1])\n for answerno in range(num_docs):\n print('Creating document', answerno, 'of', num_docs)\n basename = \"../json-data/chunck_%s\" % uuid.uuid4()\n tempname = basename + '.temp.gz'\n longtermname = basename + '.json.gz'\n\n # We compress with gzip.\n # It's relatively fast compression.\n # We could compress with bzip2 or zlib instead if we have the CPU time available.\n # We could do bits and bytes, but that's harder to debug, and only worth it if there's a LOT of data to store.\n # We could eliminate all unanswered responses, but that is a little prone to surprises.\n # We also have the option of using bson instead of json.\n with gzip.open(tempname, \"w\") as answerfile:\n row = {\"pk\": \"%d\" % answerno}\n for carvar in constants.carvars:\n row[carvar] = random.choice(constants.carbrands)\n for carvar in constants.mrcarvars:\n for carbrand in constants.carbrands:\n row[\"%s.%s\" % (carvar, carbrand)] = random.choice(constants.answers)\n for singvar in constants.singervars:\n row[singvar] = random.choice(constants.singers)\n for singvar in constants.mrsingervars:\n for singer in constants.singers:\n row[\"%s.%s\" % (singvar, singer)] = random.choice(constants.answers)\n string = json.dumps(row)\n answerfile.write(string.encode('UTF-8'))\n os.rename(tempname, longtermname)", "def write_data_to_file():\n with open(\"./data/graphql-data.js\", \"w\") as file:\n #beginning\n file.write('module.exports = {\\n')\n \n #books\n file.write(' \"Book\": {\\n')\n for book in books:\n file.write(f' \"{book_ids[book]}\": {{ id: \"{book_ids[book]}\", title: \"{book}\", genre: \"{book_genres[book]}\" , authors: [')\n for author in books[book]:\n file.write(f'{{\"id\": \"{authors[author]}\"}}, ')\n file.write(']},\\n')\n file.write(' },\\n')\n \n #authors\n file.write(' \"Author\": {\\n')\n for author in authors:\n file.write(f' \"{authors[author]}\": {{ id: \"{authors[author]}\", name: \"{author}\"')\n if author in known_relations:\n file.write(', \"knows\": [')\n for associate in known_relations[author]:\n file.write(f' {{ id: \"{authors[associate]}\" }},')\n file.write(' ]')\n else:\n #some small chance that the authors's knows value is an empty list or is null\n d20 = random.randint(1, 20)\n if d20 < 10:\n file.write(', \"knows\": []')\n else:\n file.write(', \"knows\": null')\n d20 = random.randint(1, 20)\n if d20 >= 3:\n file.write(f', \"age\": {random.randint(15, 100)}')\n elif d20 == 1:\n #5% chance that age does not exist\n pass\n elif d20 == 2: \n #5% chance that age is null\n file.write(', \"age\": null')\n file.write(', description: \"\" },\\n')\n file.write(' },\\n')\n\n #readers\n file.write(' \"Reader\": {\\n')\n for reader in readers:\n file.write(f' \"{readers[reader]}\": {{ id: \"{readers[reader]}\", name: \"{reader}\"')\n #reader knows\n if readers[reader] in reader_knows:\n file.write(f', \"knows\": [')\n for associate in reader_knows[readers[reader]]:\n file.write(f' {{ id: \"{associate}\" }},')\n file.write(' ]')\n else:\n #some small chance that the reader's knows value is an empty list or null\n d20 = random.randint(1, 20)\n if d20 < 10:\n file.write(', \"knows\": []')\n else:\n file.write(', \"knows\": null')\n #reader's favourite book\n if readers[reader] in reader_favourite_book:\n file.write(f', \"favourite_book\": {{id: \"{reader_favourite_book[readers[reader]]}\"}}')\n else:\n #some small chance that the reader's favourite book value is an empty string\n #d20 = random.randint(1, 20)\n #if d20 < 5:\n file.write(', \"favourite_book\": null')\n d20 = random.randint(1, 20)\n if d20 > 4:\n file.write(f', \"age\": {random.randint(5, 100)}')\n else: \n file.write(', \"age\": null')\n file.write(', },\\n')\n file.write(' },\\n')\n\n #movies\n file.write(' \"Movie\": {\\n')\n for movie in movies:\n file.write(f' \"{movie_ids[movie]}\": {{ id: \"{movie_ids[movie]}\", title: \"{movie}\", genre: \"{movie_genres[movie]}\", producer: {{id: \"{movie_producers[movie]}\"}}, director: \"{generate_director_name()}\"}},\\n')\n file.write(' },\\n')\n\n #end\n file.write('}\\n')\n #make variable exportable\n #file.write('export { books };')\n file.close()", "def process_reddit_comment_file(f,\n output_folder):\n ## Output File\n if output_folder is not None:\n fname = os.path.basename(f).replace(\"comments.json\",\"processed.comments.json\")\n if not fname.endswith(\".gz\"):\n fname = fname + \".gz\"\n output_folder = output_folder.rstrip(\"/\")\n fname = f\"{output_folder}/{fname}\"\n if os.path.exists(fname):\n return fname\n ## Load Comment Data\n if f.endswith(\".gz\"):\n file_opener = gzip.open\n else:\n file_opener = open\n try:\n with file_opener(f, \"r\") as the_file:\n comment_data = json.load(the_file)\n except json.JSONDecodeError:\n with file_opener(f, \"r\") as the_file:\n comment_data = []\n for line in the_file:\n comment_data.append(json.loads(line))\n ## Check Data\n if len(comment_data) == 0:\n return None\n ## Transform into DataFrame\n comment_data = pd.DataFrame(comment_data).dropna(subset=[\"body\"])\n ## Tokenize Text\n comment_data[\"text_tokenized\"] = comment_data[\"body\"].map(tokenizer.tokenize)\n ## Add Meta\n comment_data[\"source\"] = f\n comment_data[\"entity_type\"] = \"comment\"\n comment_data[\"date_processed_utc\"] = int(datetime.utcnow().timestamp())\n ## Rename Columns and Subset\n comment_data.rename(columns = DB_SCHEMA[\"reddit\"][\"comment\"], inplace=True)\n comment_data = comment_data[list(DB_SCHEMA[\"reddit\"][\"comment\"].values())]\n ## Format Into JSON\n formatted_data = comment_data.apply(lambda row: row.to_json(), axis=1).tolist()\n formatted_data = list(map(lambda x: json.loads(x), formatted_data))\n ## Dump Processed Data (or return)\n if output_folder is None:\n return formatted_data\n else:\n with gzip.open(fname, \"wt\", encoding=\"utf-8\") as the_file:\n json.dump(formatted_data, the_file)\n return fname", "def get_commentary_for_book_chapters(book_file_name):\n if not os.path.exists(challoner_store):\n os.mkdir(challoner_store)\n with open(book_file_name, \"r+\") as rh:\n book = json.load(rh)\n chapter_text = {}\n\n for name, chapters_dictionary in book.items():\n \n for chap, location in chapters_dictionary.items():\n norm = normalize_filename(\"{}_{}\".format(name, chap))\n \n outfile = \"{}/{}.json\".format(challoner_store, norm)\n \n if os.path.exists(outfile):\n continue\n else:\n chapter_text[name + \"__\" + chap] = get_commentary_for_chapter(location)\n with open(outfile, \"w+\") as wh:\n json.dump(chapter_text, wh)\n chapter_text = {}", "def create_file(self):\n for data_element in self.data:\n title = data_element['title']\n anchor = data_element['href']\n example = data_element['example']\n content = data_element['content']\n if example:\n abstract = '<section class=\"prog__container\">{}<br>{}</section>'.format(content, example)\n\n list_of_data = [\n title, # api title\n 'A', # type is article\n '', # no redirect data\n '', # ignore\n '', # no categories\n '', # ignore\n '', # no related topics\n '', # ignore\n '', # no external link\n '', # no disambiguation\n '', # images\n abstract, # abstract\n anchor # url to doc\n ]\n self.output_file.write('{}\\n'.format('\\t'.join(list_of_data)))", "def main(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n\n course_dict = {}\n course_dict['course_id'] = str(os.path.split(filename.strip('/'))[-1])\n course_dict['blocks'] = build_course_map(data)\n\n filename = '%s' % course_dict['course_id']\n filepath = os.path.join('../input/', filename)\n\n with open(filepath, 'w') as outfile:\n json.dump(course_dict, outfile, indent=4)", "def read_chumps_from_file(self):\n for chump in self.comment_store.get_stored_chumps():\n self.add_chump(chump)", "def dump_all_json():\n\n # Set up process logging.\n # Existence of error log file can tell us if errors occur.\n script_logging.clear_status_log()\n script_logging.clear_error_log()\n\n # Pull list of courses\n courses = canvas_data.pull_courses()\n\n # If there are course ID parameters, just load the specified courses\n if len(sys.argv) > 1:\n course_id_list = map(int, sys.argv[1:])\n courses = [course for course in courses if course['id'] in course_id_list]\n\n # course_id = int(sys.argv[1])\n # courses = [course for course in courses if course['id'] == course_id]\n\n script_logging.log_status('Storing courses JSON to %s' % (COURSES_FILE_NAME))\n with open(COURSES_FILE_NAME, 'w') as f:\n json.dump(courses, f, indent = 2)\n \n for course in courses:\n course_id = course['id']\n\n # Pull students in each course\n students = canvas_data.pull_course_students(course_id)\n dump_json(students, STUDENTS_FILE_NAME, course_id, \"course students\")\n\n # Pull users for each course.\n # We'll need this to look up comment submitters.\n users = canvas_data.pull_course_users(course_id)\n dump_json(users, USERS_FILE_NAME, course_id, \"course users\")\n\n # pull assignments for each course\n assignments = canvas_data.pull_assignments(course_id)\n dump_json(assignments, ASSIGNMENTS_FILE_NAME, course_id, 'course assignments')\n\n # pull submissions for each assignment\n for assignment in assignments:\n assignment_id = assignment[\"id\"]\n submissions = canvas_data.pull_submissions_with_comments(course_id, assignment_id)\n dump_json(submissions, SUBMISSIONS_FILE_NAME, assignment_id, 'assignment submissions')", "def save_comments(self, videoId):\n comm_obj = self.get_comment_obj(videoId)# need to get the id \n\n file_exists = os.path.isfile(self.path)\n f = open(self.path, 'a', encoding='utf-8-sig')\n writer_top = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n if not file_exists:\n writer_top.writerow(['etag'] + ['videoId'] + ['commentId'] + ['text'] + ['author'] + ['like'] + ['time'])\n f.close()\n\n f = open(self.path, 'a', encoding='utf-8-sig')\n writer_top = csv.writer(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n\n for i in comm_obj['items']:\n\n result_row = [[i['etag'], i['snippet']['videoId'], i['snippet']['topLevelComment']['id'], i['snippet']['topLevelComment']['snippet']['textDisplay'], i['snippet']['topLevelComment']['snippet']['authorDisplayName'], i['snippet']['topLevelComment']['snippet']['likeCount'], i['snippet']['topLevelComment']['snippet']['publishedAt']]]\n writer_top.writerows(result_row)\n f.close()", "def create_checkfile(artists):\n with open('./data/checkfile.txt', 'w') as checkfile:\n for new_artist in artists:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks:\n print('{0.name}\\t{1.name}\\t{1.year}\\t{2.title}'.format(new_artist, new_album, new_song), file=checkfile)", "def create_file(output_json):\n folder = \"data/\"\n filename = datetime.now().strftime(\"%d-%m-%Y\") + \"-moisture-read.json\"\n filepath = folder+filename\n\n # Create Local folder\n try:\n os.mkdir(folder)\n except OSError:\n pass\n #print(\"Directory already created or a failure occured on directory (%s)\" % folder)\n\n # Create Empty Json file if it doesnt exists\n if(Path(filepath)).exists():\n pass\n else:\n try:\n f = open(filepath, \"a\")\n f.write('{\\n\"moisture_iot_project\":[]\\n}')\n f.close()\n except Exception as e:\n print(\"Failure occured creating the JSON file (%s)\" % e)\n\n # Open Json file to append current structure\n with open(filepath) as outfile:\n data = json.load(outfile)\n\n # Get list with all dictionaries\n temp = data['moisture_iot_project']\n\n # Append current structure\n temp.append(output_json)\n\n # Reorganize List values and re-write to JSON file\n data['moisture_iot_project'] = temp\n write_json(data, filepath)", "def create_checkfile(artist_list):\n\n print(\"Creating checkfile...\")\n\n with open(\"checkfile.txt\", \"w\") as checkfile:\n\n for artist in artist_list:\n print(artist.name)\n for album in artist.albums:\n print(\"\\t\", album.name, album.year)\n for song in album.tracks:\n print(\"\\t\\t\", song.title)\n print(f\"{artist.name}\\t{album.name}\\t{album.year}\\t{song.title}\", file=checkfile)\n\n print(\"Checkfile created.\")\n print()\n print(\"=\" * 40)\n print()", "def initialize_descriptive_json(json_filename,wk_dir,model_dir,obs_dir):\n output = {'provenance':{},'data':{},'metrics':{},'plots':{},'index': 'index.html','html':'index.html'}\n log_path = wk_dir + '/asop_coherence.log.txt'\n output['provenance'] = {'environment': get_env(),\n 'modeldata': model_dir,\n 'obsdata': obs_dir,\n 'log': log_path}\n with open(json_filename,'w') as output_json:\n json.dump(output,output_json, indent=2)\n\n return", "def build():\n for root, dirs, files in os.walk(IN_PATH):\n for filename in files:\n if filename.endswith('.csv'):\n with open(os.path.join(IN_PATH, filename), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n path = row[0].split('.')\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = row[1]\n else:\n d = d[path[i]]\n with open (os.path.join(OUT_PATH, filename.replace('.csv', '.json')), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open (os.path.join(WEB_PATH, filename.replace('.csv', '.js')), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))\n\n with open(os.path.join(IN_PATH, 'en_US.csv'), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n path = row[0].split('.')\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = zz_string(row[1], row[0])\n else:\n d = d[path[i]]\n with open(os.path.join(OUT_PATH, 'zz_ZZ.json'), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open(os.path.join(WEB_PATH, 'zz_ZZ.js'), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))", "def dump(self):\n course = {\n \"course_id\": self.course_id,\n \"title\": self.title,\n \"chapters\": {}\n }\n for chapter_num in self.chapters:\n chapter = self.chapters[chapter_num]\n course[\"chapters\"][chapter.num] = {\n \"name\": chapter.name,\n \"lessons\": {lesson_num: lesson_data.name for lesson_num,\n lesson_data in chapter.lessons.items()}\n }\n with open(_JSON_PATH_FORMAT % self.course_id, \"w+\") as fp:\n _logger.debug(\"Dumping the data into a JSON file so that it can \"\n \"be accessed at a later time quickly and without \"\n \"need to scrape LearnItFirst.com, saving time and \"\n \"unnecessary requests.\")\n json.dump(course, fp)", "def cron_main():\n\t_, club_urls = get_all_clubs()\n\t\n\tclub_info_full =[]\n\tcat_full =[]\n\n\tfor url in club_urls:\n\t\tclub_info, cat_arr = get_club_info(url)\n\t\tclub_info_full.append(club_info)\n\t\tcat_full.extend(cat_arr)\n\n\twith open('clubs.csv', 'w+') as f:\n\t newfile = csv.writer(f, delimiter='|',quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t for club in infoClubs:\n\t \tclubStuff = []\n\t \tfor info in club:\n\t \t\tclubStuff.append(info)\n\t newfile.writerow(clubStuff)\n\n\twith open('clubsCat.csv', 'w+') as fn:\n\t catFile = csv.writer(fn, delimiter=',',quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n\t catFile.writerow(['club_id','club_tag'])\n\t for lol in categoryArr:\n\t catFile.writerow(lol)", "def create_training_set_with_comments(posts_dict, comments_dict, output_filename=direc+\"/training_with_comments.txt\"):\r\n print(\"Creating training set with comments...\")\r\n with open(output_filename, 'w') as f:\r\n total = len(posts_dict)\r\n print(\"# of questions: \" + str(total))\r\n current = 0\r\n for question in posts_dict:\r\n accepted = posts_dict[question]['accepted']\r\n others = posts_dict[question]['other']\r\n line = question\r\n if question in comments_dict:\r\n line += \" \" + \" \".join(comments_dict[question])\r\n \r\n line += \"\\t\" + accepted\r\n if accepted in comments_dict:\r\n line += \" \" + \" \".join(comments_dict[accepted])\r\n \r\n for other in others:\r\n line += \"\\t\" + other\r\n if other in comments_dict:\r\n line += \" \" + \" \".join(comments_dict[other])\r\n line += \"\\n\"\r\n f.write(line)\r\n\r\n current += 1\r\n print_progress(current, total)\r\n print(\"\\nFinished creating training set with comments.\\n\")", "def writeFile(self, name, folder, collected_entry_list=[]):\n file_io = open(os.path.join(folder, \"system_%s.json\" % name), \"w\")\n json.dump(collected_entry_list, file_io, sort_keys=True, indent=2)\n file_io.close()", "def mk_json_clms(dlas, outpath, prefix):\n for abssys in dlas._abs_sys:\n tdict = abssys._clmdict\n # Convert AbsLine to dicts\n if 'lines' in tdict.keys():\n new_lines = {}\n for key in tdict['lines']:\n new_lines[key] = tdict['lines'][key].to_dict()\n # Replace\n tdict['lines'] = new_lines\n tdict['Name'] = abssys.name\n tdict['Creation_date'] = str(datetime.date.today().strftime('%Y-%b-%d'))\n # Outfil\n name = survey_name(prefix, abssys)\n outfil = outpath+name+'_clm.json'\n # Write\n print('Writing {:s}'.format(outfil))\n with io.open(outfil, 'w', encoding='utf-8') as f:\n f.write(unicode(json.dumps(tdict, sort_keys=True, indent=4,\n separators=(',', ': '))))", "def _writeComments(self):\n self.header.write(wrapLine(\"NSCOML\", self.annotation, self.delimiter, \"%d\\n\" % self.NSCOML))\n self.header.write(wrapLines(\"SCOM\", self.annotation, self.delimiter, \"%s\\n\" * self.NSCOML % tuple(self.SCOM)))\n self.header.write(wrapLine(\"NNCOML\", self.annotation, self.delimiter, \"%d\\n\" % self.NNCOML))\n self.header.write(wrapLines(\"NCOM\", self.annotation, self.delimiter, \"%s\\n\" * self.NNCOML % tuple(self.NCOM)))", "def create_submission_file(\n json_out_file, challenge, submission_url, model_name, model_description, nyu_data_only,\n participants=None, paper_url=None, code_url=None\n):\n\n if challenge not in {'singlecoil', 'multicoil'}:\n raise ValueError(f'Challenge should be singlecoil or multicoil, not {challenge}')\n\n phase_name = f'{challenge}_leaderboard'\n submission_data = dict(\n recon_zip_url=submission_url,\n model_name=model_name,\n model_description=model_description,\n nyudata_only=nyu_data_only,\n participants=participants,\n paper_url=paper_url,\n code_url=code_url\n )\n submission_data = dict(result=[{\n phase_name: submission_data\n }])\n\n with open(json_out_file, 'w') as json_file:\n json.dump(submission_data, json_file, indent=2)", "def writeFile(fileName, profile, singleScores, bestMotifs, dnaScores, bestMotif):\n with open(fileName, 'w+') as f:\n f.write(strftime(\"Created on: %Y-%m-%d %H:%M:%S\\n\", localtime()))\n f.write('Best Motifs: ')\n f.write('\\n')\n json.dump(bestMotif, f)\n f.write('\\n')\n f.write('Motifs Profile: ')\n f.write('\\n')\n json.dump(profile, f)\n f.write('\\n')\n f.write('Single Scores: ')\n f.write('\\n')\n for i in range(0, len(singleScores)):\n json.dump(bestMotifs[i], f)\n f.write(': ')\n json.dump(singleScores[i], f)\n f.write('\\n')\n f.write('Motifs that have a better score than the worst scoring one: ')\n f.write('\\n')\n for scores in dnaScores:\n json.dump(scores, f)\n f.write('\\n')", "def gen_review_data(fp: str) -> None:\n with open(fp, encoding='utf-8') as f:\n for line in f:\n data = json.loads(line)\n utils.preprocess_raw_json(data)\n doc = {\n \"_index\": \"review\",\n \"_source\": data\n }\n yield doc", "def write():\n output_data = book_list_manipulation()\n\n # Create data directory\n try:\n os.mkdir(DATA_DIR)\n except FileExistsError:\n pass # Ignore - if directory exists, don't need to do anything.\n\n with open(BOOKS_FILE_NAME, 'w') as f:\n json.dump(output_data, f)\n\n with open(COUNTER_FILE_NAME, 'w') as f:\n f.write(str(counter))", "def prepare_conll_data(brat_dirname,\n json_filename,\n dest_dirname):\n brat2json_dir(brat_dirname, json_filename)\n data = read_json(json_filename)\n # split data into training, validation and test\n training_data, validation_data, test_data = split_data(data)\n # output training data into CoNLL format\n output_conll(entity2label_batch(training_data), dest_dirname + 'training.conll')\n # output all data into json file\n write_json(dest_dirname + 'training.json', training_data)\n write_json(dest_dirname + 'validation.json', validation_data)\n write_json(dest_dirname + 'test.json', test_data)" ]
[ "0.7101525", "0.6485747", "0.6191754", "0.6072703", "0.5943167", "0.57518977", "0.56992775", "0.5630235", "0.5627743", "0.5570938", "0.55540836", "0.5501463", "0.5486059", "0.547262", "0.54247934", "0.54052603", "0.5391565", "0.5380575", "0.5356663", "0.5330073", "0.5302979", "0.5282844", "0.52736366", "0.5200469", "0.5191", "0.51762813", "0.5163692", "0.5163303", "0.51587355", "0.5142673" ]
0.9017318
0
Add a new comment to an existing club.
def add_club_comment(user, club, comment): with open('club_comments.json') as json_file: comments = json.load(json_file) if club in comments.keys(): if comments[club] is None: # If there are no comments associated with the club Python returns None comments[club] = [user + ": " + comment] else: comments[club].append(user + ": " + comment) with open('club_comments.json', 'w') as outfile: json.dump(comments, outfile) return True else: return False # If the specified club name does not exist return False so an error can be specified to the api caller.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_comment(self, comment):\n assert isinstance(comment, Comment)\n self._comments.append(comment)\n return None", "def addComment(self, comment):\r\n comment.topicId = self.topicId\r\n self.comments.append(comment)\r\n return len(self.comments)-1", "def cli(ctx, comment, metadata=\"\"):\n return ctx.gi.cannedcomments.add_comment(comment, metadata=metadata)", "def add_comment(self, project_id, forum_id, comment):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/forums/' + str(forum_id) + '/comments/' \n data = parser.comment_to_json(comment)\n response = zoho_http_client.post(url, self.details, data)\n return parser.get_comments(response)[0]", "def add_comment(self, comment: str):\n self.add_relationship(RDFS.comment, self._graph.string_literal(comment))", "def add_comment(self, comment: str):\n self.add_relationship(\n RDFS.comment, self._graph.string_literal(comment))", "def add_comment(self):\n comment = Comment(\n title=self.title,\n comment=self.comment,\n rating=self.rating,\n user_from_id=g.user.id,\n user_to_id=self.user_to_id\n )\n db.session.add(comment)\n db.session.commit()\n return comment", "def add_comment_to_announcement():\n vars = request.vars\n logger.info(\"vars.comment_text: %r\" % (vars.comment_text))\n comment_id = db.Comments.insert(\n comment_text = vars.comment_text,\n score = 1,\n ann_id= vars.ann_id,\n )\n comment = db.Announcements(comment_id)\n\n logger.info(\"api:add_comment_to_announcement ==> comment= %r\" % (comment))\n\n return response.json(comment)", "def add_comment(self, issue, comment):\n return self.get_jira().add_comment(issue, comment)", "def put_comment(self, object_id, message):\n return self.put_object(object_id, \"comments\", message=message)", "def put_comment(self, object_id, message):\n return self.put_object(object_id, \"comments\", message=message)", "def add_comment(cls, post_id, user_id, content):\n c = cls(parent=comment_key(),\n post_id=post_id,\n user_id=user_id,\n content=content)\n c.put()", "def new_comment(comment):\n attribute_state = inspect(comment).attrs.get('comment')\n # Check if the speaker has been updated\n history = attribute_state.history\n # TODO Check for insert rather than assuming the comment is immutable\n if history.has_changes():\n messages.send_new_comment(comment)", "def add_comment() -> str:\n if \"markdown\" in request.form:\n if \"file\" in request.form:\n comment = Comment(\n markdown=request.form[\"markdown\"],\n submission_id=Submission.query.filter(\n Submission.filepath.contains(request.form[\"file\"])\n )\n .first()\n .id,\n cell_id=request.form[\"cell_id\"] if \"cell_id\" in request.form else None,\n user=UserModel.get_by_token(session[\"token\"]),\n )\n # If not cell_id this is a general comment\n comment.save()\n else:\n return \"Missing file or cell_id\", 400\n else:\n return \"Missing markdown\", 400\n\n comment_maker = get_template_attribute(\"_macros.html\", \"comment_block\")\n return comment_maker(comment)", "def add_comment(self, text, displayed, username, time,\n proposal, node_id, parent_id, moderator):\n raise NotImplementedError()", "def comment(self, msg):\n\t\tself._client.add_comment(self, msg)", "def add_comment_to_issue(self, issue, comment, visibility=None):\r\n self.jira.add_comment(issue=issue, body=comment)", "def problem_comments_append(self, identifier, comment, html=None):\n params = {\"text\": comment}\n if html is not None:\n params[\"html\"] = html\n \n self._post(\"problems/%d/comments\" % identifier, json=params)", "def __add_comment(self, issue_id, comment):\n import httplib2\n http = httplib2.Http() \n response, content = http.request(\n uri=self.__issue_url % int(issue_id),\n method='PUT',\n body=comment,\n headers={\n 'X-Redmine-API-Key': self.__api_key,\n 'Content-type': 'application/json'\n }\n )\n print(response)\n print(content)", "def add_comment(self, aug_conf_path: str, comment: str) -> None:\n self.aug.set(aug_conf_path + \"/#comment[last() + 1]\", comment)", "def comment(self, message):\n User.graph().put_comment(self.id, message)", "def add_comment(self, checkin_id: str, comment: str) -> Dict:\n method = \"checkin/addcomment/\" + checkin_id\n auth = self._get_access_token()\n if len(comment) > 140:\n raise ValueError(\n f\"Check-in comment is {len(comment)} characters whereas Untappd only supports comments up to 140 characters\"\n )\n params = {\"comment\": comment}\n return self._do_post(method, auth, params)", "def test_add_new_comment(self):\n\n result = self.client.post(\"/add_new_comment/2\",\n data={\"user_id\": 25, \"park_id\": \"2\", \"content\": \"My dog loves this park!\"},\n follow_redirects=True)\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)", "def post_comment(self):\n self.post_question()\n return self.client.post(\"api/v2/1/comments\", headers={\"Authorization\": \"{}\".format(self.token())}, data=json.dumps(self.comment), content_type='application/json')", "def add_comment(self, string):\n if self.comment is None:\n self.comment = string\n else:\n self.comment = self.comment.rstrip() + \"\\n \" + string", "def add_comment(self, message):\n params = {\"ffauth_device_id\": self._DiscretelyAuthenticatedObject__device_id,\n \"ffauth_secret\": self._DiscretelyAuthenticatedObject__device_token}\n data = {\"data\": str(\n {\"event\": {\"type\": \"comment\", \"message\": message, \"assessment_details_id\": self.__assessmentDetailsId},\n \"recipient\": {\"guid\": self._DiscretelyAuthenticatedObject__guid, \"type\": \"user\"}})}\n requests.post(\n self._DiscretelyAuthenticatedObject__portal + \"/_api/1.0/tasks/\" + str(self.id) + \"/responses\",\n params=params, data=data)", "def add_comment(self, text):\n selected = self.GetSelection()\n if selected != wx.NOT_FOUND:\n pseudo = get_facade()._desc.document.get_pseudo()\n self.blog.add_comment(selected, text, pseudo)\n self.refresh()\n else:\n display_warning(_(\"none selected\"))", "def comment(self, text):\n url = \"https://api.imgur.com/3/comment\"\n payload = {'image_id': self.id, 'comment': text}\n resp = self._imgur._send_request(url, params=payload, needs_auth=True,\n method='POST')\n return Comment(resp, imgur=self._imgur, has_fetched=False)", "def _add_comment(self, comment, post_id, page_id, parent_comment=None):\n user_id = self._get_or_create_user(comment['from'])\n message = self._clean_message(comment)\n if len(message) > 0:\n columns = '(user, post, page, fb_id, created_time, message, like_count, comment_count'\n values = (user_id, post_id, page_id, comment['id'], comment['created_time'],\n message, comment['like_count'], comment['comment_count'])\n values_placeholder = '(%s,%s,%s,%s,%s,%s,%s,%s'\n if parent_comment is None:\n columns = columns + ')'\n values_placeholder = values_placeholder + ')'\n else:\n columns = columns + ',parent_comment)'\n values = values + (parent_comment,)\n values_placeholder = values_placeholder + ',%s)'\n return self._insert_if_possible('INSERT INTO comment {} VALUES {}'.format(columns, values_placeholder),\n values)\n else:\n return False", "def add_comment(self, author, string, ds_name='ds', data_var=None):\n\n attr = 'comment'\n self.add_string(attr, author, string, ds_name=ds_name, data_var=data_var)" ]
[ "0.7195255", "0.71707064", "0.7056008", "0.7054779", "0.70507956", "0.7042315", "0.6959034", "0.68218344", "0.67307377", "0.66941494", "0.66941494", "0.6638886", "0.6618629", "0.6611672", "0.660841", "0.657663", "0.65676564", "0.65323", "0.65181214", "0.6402509", "0.63962823", "0.6379831", "0.6372909", "0.6345311", "0.63154685", "0.63007176", "0.6264228", "0.6249024", "0.6247178", "0.6223579" ]
0.7891196
0
Hits the designated endpoint (volume/posts) and gets data for a specified timespan. The ratelimit is burned through ASAP and then backed off for one minute.... ???? .
def get_data_from_endpoint(self, from_, to_, endpoint): endpoint = self.make_endpoint(endpoint) from_, to_ = str(from_), str(to_) payload = { 'auth': self.auth_token, 'id': self.monitor_id, 'start': from_, 'end': to_, 'extendLimit': 'true', 'fullContents': 'true' } r = self.session.get(endpoint, params=payload) ratelimit_remaining = r.headers['X-RateLimit-Remaining'] #print ('Remaining Ratelimit = ' + str(ratelimit_remaining)) # If the header is empty or 0 then wait for a ratelimit refresh. if (not ratelimit_remaining) or (float(ratelimit_remaining) < 1): #print('Waiting for ratelimit refresh...') sleep(self.ratelimit_refresh) return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rest_rate_limit(r):\n\n try:\n #limit = int(r.headers[\"X-Rate-Limit-Limit\"])\n remain = int(r.headers[\"X-Rate-Limit-Remaining\"])\n reset = int(r.headers[\"X-Rate-Limit-Reset\"])\n curtime = times.to_unix(times.parse(r.headers[\"date\"]))\n except KeyError as e:\n # We dont have the proper headers\n log.error(\"Header not found - {}\", e)\n sleep(RETRY_AFTER)\n return\n\n if remain <= RATE_LIMIT_BUFFER:\n log.debug(\"Hit rate limit - {}\", remain)\n log.debug(\"Rate limit reset in {} seconds\", reset - curtime)\n sleep(reset - curtime + RESET_BUFFER)", "def _get(self,url,params):\n \n while time.time() < self.last_call_time + self.rate_limit_delay:\n if self.verbose:\n print(\"{}: Sleeping\".format(time.ctime()))\n time.sleep(1)\n \n headers={'Authorization':'Bearer ' + self.token} #This is needed to authenticate\n\n if self.verbose:\n print(\"{}: GETTING {}\".format(time.ctime(),url))\n safeHeaders = {k:v for k,v in headers.items() if k!='Authorization'}\n safeHeaders['Authorization']=\"Bearer <shhhhhh it's a secret>\"\n print(\"\\tHeaders: {}\".format(safeHeaders))\n print(\"\\tArgs: {}\".format(params))\n resp = self.sess.get(url, headers=headers, params=params, proxies=self.proxy)\n \n self.last_call_time = time.time()\n return resp", "def _do_request(self, endpoint, params=None):\n\n resp = requests.get(\n urljoin(API_BASE_URL, endpoint), headers=self.headers, params=params\n )\n print(urljoin(API_BASE_URL, endpoint))\n if resp.status_code == 404:\n return []\n if resp.status_code == 429:\n period_remaining = int(\n re.match(r\"\\D*(\\d+)\\D*\", resp.json()[\"message\"]).group(1)\n )\n raise RateLimitException(\n message=resp.json()[\"message\"], period_remaining=period_remaining\n )\n resp.raise_for_status()\n return resp.json()", "def get_gdax_historical_data():\n \n start = None\n while not start:\n start,end,tid = getStartAndEndHistoric()\n if not start:\n time.sleep(60)\n #Todo: change this to 1min\n firsttimestamp = start\n engine = sa.create_engine(sql_address)\n products = [\"LTC-USD\",\"LTC-BTC\",\"ETH-USD\",\"ETH-BTC\",\"BTC-USD\"]\n public_client = gdax.PublicClient()\n deltat = datetime.timedelta(seconds = 200)\n timewindows = []\n while end - start > datetime.timedelta(seconds=0):\n if start + deltat > end:\n endx = end\n else:\n endx = start + deltat\n timewindows.append([start,endx])\n start += deltat\n results = []\n total = len(timewindows)\n current_idx = 0\n timeold = time.time()\n numofqueries = 0\n engine = sa.create_engine(sql_address)\n Base.metadata.bind = engine\n DBSession = sa.orm.sessionmaker()\n DBSession.bind = engine\n session = DBSession()\n for startx,endx in timewindows:\n\n current_idx += 1\n for i in products:\n repeat = True\n while repeat:\n\n #delay if ratelimts are close\n if numofqueries < 3:\n while time.time() - timeold < 1:\n time.sleep(0.05)\n \n timeold = time.time()\n numofqueries = 0\n try:\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n except:\n time.sleep(30)\n public_client = gdax.PublicClient()\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n numofqueries += 1\n\n #rate limit exceeded has 'message' as dict.\n if not 'message' in alist:\n repeat = False\n for a in alist:\n a[0] = datetime.datetime.fromtimestamp(float(a[0]))\n tmp = i.split('-')\n d = dict(coin = tmp[0], currency = tmp[1], timestamp = a[0], low=a[1], high=a[2], open=a[3], close=a[4], volume=a[5])\n results.append(d)\n lasttimestamp = a[0]\n\n #upload with batch size of 10000\n if len(results) > 10000:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n if len(results) > 0:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n\n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n update.status='complete'\n order = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'order')).first()\n order.status='complete'\n session.commit()", "def range():\n\n # Time this functions.\n timer = coils.Timer()\n\n # Parse the URL parameter \"amount\".\n errors = list()\n try:\n amount = flask.request.args.get('amount')\n amount = float(amount)\n except:\n errors.append('Failed to parse \"amount\" parameter.')\n\n # Bail on any errors.\n if errors:\n return flask.jsonify(errors=errors)\n\n\n latest_tstring = db.session.query(mapping.Datum).\\\n filter(mapping.Datum.name=='latest_tstamp')[0].value\n latest_time = coils.string2time(latest_tstring)\n start_time = latest_time - dt.timedelta(seconds=amount)\n start_tstring = getNearestTime(start_time)\n \n return flask.jsonify(\n begin_time=start_tstring,\n end_time=latest_tstring,\n )", "def _get_consumption(self, url, start, end, aggregation):\n start = self._to_milliseconds(start)\n end = self._to_milliseconds(end)\n\n headers = {\"Authorization\": \"Bearer {}\".format(self.access_token)}\n params = {\n \"aggregation\": aggregation,\n \"from\": start,\n \"to\": end\n }\n r = requests.get(url, headers=headers, params=params)\n r.raise_for_status()\n return r.json()", "def fetchraw(self, pv, callback,\n cbArgs=(), cbKWs={},\n T0=None, Tend=None,\n count=None, chunkSize=None,\n archs=None, breakDown=None,\n enumAsInt=False, displayMeta=False, rawTimes=False):\n if breakDown is None:\n breakDown = yield self.search(exact=pv, archs=archs,\n breakDown=True, rawTime=True)\n\n breakDown = breakDown[pv]\n\n if len(breakDown)==0:\n _log.error(\"PV not archived\")\n defer.returnValue(0)\n\n if rawTimes:\n Tcur, Tend = T0, Tend\n else:\n Tcur, Tend = timeTuple(T0), timeTuple(Tend)\n\n _log.debug(\"Time range: %s -> %s\", Tcur, Tend)\n _log.debug(\"Planning with: %s\", map(lambda a,b,c:(a,b,self.__rarchs[c]), breakDown))\n\n plan = []\n \n # Plan queries\n # Find a set of non-overlapping regions\n for F, L, K in breakDown:\n # some mis-match of definitions\n # the search results give the times\n # of the first and last samples\n # inclusive.\n # time range [F, L]\n # However, values() query end time\n # is exclusive\n # time range [F, L)\n # We step the end time forward by 1 micro-second\n # to ensure that the last sample can be returned.\n # Note: it seems that Channel Archiver uses\n # micro-sec resolution times for comparisons...\n _log.debug(\"Before: %s\", L)\n LS, LN = L\n LN += 1000\n if LN>1000000000:\n LS += 1\n LN = 0\n L = LS, LN\n _log.debug(\"After: %s\", L)\n\n if L <= Tcur:\n continue # Too early, keep going\n elif F >= Tend:\n break # No more data in range\n\n # range to request from this archive\n Rstart = max(Tcur, F)\n Rend = min(Tend, L)\n\n plan.append((Rstart, Rend, K))\n \n Tcur = Rend\n\n if len(plan)==0 and len(breakDown)>0 and breakDown[-1][1] <= Tcur:\n # requested range is later than last recorded sample,\n # which is all we can return\n F, L, K = breakDown[-1]\n LS, LN = L\n plan.append(((LS+1,0),(LS+2,0),K))\n count=1\n _log.debug(\"Returning last sample. No data in or after requested time range.\")\n elif len(plan)==0:\n # requested range is earlier than first recorded sample.\n _log.warn(\"Query plan empty. No data in or before request time range.\")\n defer.returnValue(0)\n\n _log.debug(\"Using plan of %d queries %s\", len(plan), map(lambda a,b,c:(a,b,self.__rarchs[c]), plan))\n\n N = yield self._nextraw(0, pv=pv, plan=plan,\n Ctot=0, Climit=count,\n callback=callback, cbArgs=cbArgs,\n cbKWs=cbKWs, chunkSize=chunkSize,\n enumAsInt=enumAsInt, displayMeta=displayMeta)\n\n defer.returnValue(N)", "def _post(self,url,params):\n\n while time.time() < self.last_call_time + self.rate_limit_delay:\n if self.verbose:\n print(\"{}: Sleeping\".format(time.ctime()))\n time.sleep(1)\n \n headers={'Authorization':'Bearer ' + self.token} #This is needed to authenticate\n\n if self.verbose:\n print(\"{}: POSTING {}\".format(time.ctime(),url))\n safeHeaders = {k:v for k,v in headers.items() if k!='Authorization'}\n safeHeaders['Authorization']=\"Bearer <shhhhhh it's a secret>\"\n print(\"\\tHeaders: {}\".format(safeHeaders))\n print(\"\\tArgs: {}\".format(params))\n resp = self.sess.post(url, headers=headers, json=params, proxies=self.proxy) \n \n self.last_call_time = time.time()\n \n return resp.json()", "def on_get(self, req, resp):\n task = get_median_for_last_min.delay(time.time())\n\n result_url = os.path.join(\n os.environ['MEDIAN_API_URL'], 'result', task.id)\n resp.body = json.dumps({'result_url': result_url})\n resp.status = falcon.HTTP_200", "def listget(base_url, keys, throttle, generic_rate, max_lookback, tmpdir, repo_configs, error_rate, get_rate):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise List/Get; base_url:{a}, throttle:{b}, generic_rate:{c}, max_lookback:{d}, tmpdir:{g}, error_rate:{h}, get_rate:{i}\".format(x=tname, a=base_url, b=throttle, c=generic_rate, d=max_lookback, g=tmpdir, h=error_rate, i=get_rate))\n\n genopts = [\"generic\", \"specific\"]\n genprobs = [generic_rate, 1 - generic_rate]\n\n getopts = [\"get\", \"leave\"]\n getprobs = [get_rate, 1 - get_rate]\n\n erropts = [\"err\", \"ok\"]\n errprobs = [error_rate, 1 - error_rate]\n\n errtypes = [\"page\", \"page_size\", \"missing_since\", \"malformed_since\"]\n errtypeprobs = [0.25] * 4\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n reqtype = _select_from(genopts, genprobs)\n #print \"Req: \" + reqtype\n\n # use this to determine the repository id for the request\n repository_id = None\n if reqtype == \"specific\":\n config = _select_from(repo_configs)\n repository_id = config.get(\"repository\")\n\n # determine the \"since\" date we're going to use for the request\n lookback = randint(0, max_lookback)\n since = dates.format(dates.before_now(lookback))\n # print \"Since: \" + since\n\n # choose a page size\n page_size = randint(1, 100)\n\n # now decide, after all that, if we're going to send a malformed request\n err = _select_from(erropts, errprobs)\n\n # if we are to make an erroneous request, go ahead and do it\n if err == \"err\":\n # choose a kind of malformed request\n malformed = _select_from(errtypes, errtypeprobs)\n params = {\"page\" : 1, \"pageSize\" : page_size, \"since\" : since}\n if malformed == \"page\":\n params[\"page\"] = \"one\"\n elif malformed == \"page_size\":\n params[\"pageSize\"] = \"twelvty\"\n elif malformed == \"missing_since\":\n del params[\"since\"]\n else:\n params[\"since\"] = \"a week last thursday\"\n\n # make the malformed url with the JPER client, so we know it gets there ok\n url = j._url(\"routed\", id=repository_id, params=params)\n app.logger.debug(\"Thread:{x} - List/Get sending malformed request for Account:{y} Type:{z} Error:{a} URL:{b}\".format(x=tname, y=api_key, z=reqtype, a=malformed, b=url))\n\n # make the request, and check the response\n resp = http.get(url)\n if resp is not None and resp.status_code == 400:\n app.logger.debug(\"Thread:{x} - List/Get received correct 400 response to malformed request\".format(x=tname))\n else:\n if resp is None:\n sc = None\n else:\n sc = resp.status_code\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; did not receive 400 response to malformed request, got {y}; URL:{z}\".format(x=tname, y=sc, z=url))\n\n # continue, so that we don't have to indent the code below any further\n continue\n\n # if we get to here, we're going to go ahead and do a normal request\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} Type:{z} Since:{a}\".format(x=tname, y=api_key, z=reqtype, a=since))\n\n # iterate over the notifications, catching any errors (which would be unexpected)\n try:\n count = 0\n for note in j.iterate_notifications(since, repository_id, page_size):\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} listing notifications for Repository:{z} retrieved Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n count += 1\n\n # determine if we're going to get the notification by itself (which is technically unnecessary, of course, but who knows what people's workflows will be)\n reget = _select_from(getopts, getprobs)\n if reget == \"get\":\n try:\n n = j.get_notification(note.id)\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} listing notifications for Repository:{z}, successfully retrieved copy of Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=note.id, b=e.message))\n\n # now retrieve all the links in the note\n for link in note.links:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} on Repository:{b}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=note.id, a=url, b=repository_id))\n try:\n stream, headers = j.get_content(url)\n except client.JPERAuthException as e:\n # we got a 401 back from the service, that is acceptable, since we may not be authorised to access it\n app.logger.debug((\"Thread:{x} - get content unauthorised (401) for Content:{z} - this can happen, so is not necessarily unexpected\".format(x=tname, z=url)))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n\n app.logger.debug(\"Thread:{x} - List/Get request completed successfully for Account:{y} listing notifications for Repository:{z} Count:{a}\".format(x=tname, y=api_key, z=repository_id, a=count))\n\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; List/Get request for Account:{y} listing notifications for Repository:{z} resulted in exception '{e}'\".format(x=tname, y=api_key, z=repository_id, e=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))", "def http_call(self, request):\n response = self.session.get(request)\n attempts = 0\n while response.status_code == 429:\n if attempts > 5:\n break\n attempts = attempts + 1\n time.sleep(30)\n response = self.session.get(request)\n response.raise_for_status()\n return response", "def send_data():\n range = request.args.get('range', '30')\n time = arrow.utcnow().replace(minutes=-int(range))\n data = Temperature.query\\\n .filter(Temperature.timestamp > time).order_by(Temperature.timestamp.desc()).all()\n return jsonify(results=[i.serialize for i in data])", "def bus_layer(start,end, results, case):\n def overpass_request(data, pause_duration=None, timeout=180, error_pause_duration=None):\n \"\"\"\n Send a request to the Overpass API via HTTP POST and return the JSON\n response.\n Parameters\n ----------\n data : dict or OrderedDict\n key-value pairs of parameters to post to the API\n pause_duration : int\n how long to pause in seconds before requests, if None, will query API\n status endpoint to find when next slot is available\n timeout : int\n the timeout interval for the requests library\n error_pause_duration : int\n how long to pause in seconds before re-trying requests if error\n Returns\n -------\n dict\n \"\"\"\n\n # define the Overpass API URL, then construct a GET-style URL as a string to\n # hash to look up/save to cache\n url = settings.overpass_endpoint.rstrip('/') + '/interpreter'\n prepared_url = requests.Request('GET', url, params=data).prepare().url\n cached_response_json = get_from_cache(prepared_url)\n\n if cached_response_json is not None:\n # found this request in the cache, just return it instead of making a\n # new HTTP call\n return cached_response_json\n\n else:\n # if this URL is not already in the cache, pause, then request it\n if pause_duration is None:\n this_pause_duration = get_pause_duration()\n log('Pausing {:,.2f} seconds before making API POST request'.format(this_pause_duration))\n time.sleep(this_pause_duration)\n start_time = time.time()\n log('Posting to {} with timeout={}, \"{}\"'.format(url, timeout, data))\n response = requests.post(url, data=data, timeout=timeout, headers=get_http_headers())\n\n # get the response size and the domain, log result\n size_kb = len(response.content) / 1000.\n domain = re.findall(r'(?s)//(.*?)/', url)[0]\n log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'.format(size_kb, domain, time.time() - start_time))\n\n try:\n response_json = response.json()\n if 'remark' in response_json:\n log('Server remark: \"{}\"'.format(response_json['remark'], level=lg.WARNING))\n save_to_cache(prepared_url, response_json)\n except Exception:\n # 429 is 'too many requests' and 504 is 'gateway timeout' from server\n # overload - handle these errors by recursively calling\n # overpass_request until we get a valid response\n if response.status_code in [429, 504]:\n # pause for error_pause_duration seconds before re-trying request\n if error_pause_duration is None:\n error_pause_duration = get_pause_duration()\n log(\n 'Server at {} returned status code {} and no JSON data. Re-trying request in {:.2f} seconds.'.format(\n domain,\n response.status_code,\n error_pause_duration),\n level=lg.WARNING)\n time.sleep(error_pause_duration)\n response_json = overpass_request(data=data, pause_duration=pause_duration, timeout=timeout)\n\n # else, this was an unhandled status_code, throw an exception\n else:\n log('Server at {} returned status code {} and no JSON data'.format(domain, response.status_code),\n level=lg.ERROR)\n raise Exception(\n 'Server returned no JSON data.\\n{} {}\\n{}'.format(response, response.reason, response.text))\n\n return response_json\n def get_node(element):\n \"\"\"\n Convert an OSM node element into the format for a networkx node.\n\n Parameters\n ----------\n element : dict\n an OSM node element\n\n Returns\n -------\n dict\n \"\"\"\n useful_tags_node = ['ref', 'highway', 'route_ref', 'asset_ref']\n\n node = {}\n node['y'] = element['lat']\n node['x'] = element['lon']\n node['osmid'] = element['id']\n\n\n if 'tags' in element:\n for useful_tag in useful_tags_node:\n if useful_tag in element['tags']:\n node[useful_tag] = element['tags'][useful_tag]\n return node\n def get_path(element,element_r):\n \"\"\"\n Convert an OSM way element into the format for a networkx graph path.\n\n Parameters\n ----------\n element : dict\n an OSM way element\n element_r : dict\n an OSM way element\n\n Returns\n -------\n dict\n \"\"\"\n useful_tags_path_e = ['bridge', 'tunnel', 'oneway', 'lanes', 'name',\n 'highway', 'maxspeed', 'service', 'access', 'area',\n 'landuse', 'width', 'est_width', 'junction']\n\n useful_tags_path_r = ['bridge', 'tunnel', 'oneway', 'lanes', 'ref', 'direction', 'from', 'to', 'name',\n 'highway', 'maxspeed', 'service', 'access', 'area',\n 'landuse', 'width', 'est_width', 'junction']\n\n\n\n path = {}\n path['osmid'] = element['id']\n\n # remove any consecutive duplicate elements in the list of nodes\n grouped_list = groupby(element['nodes'])\n path['nodes'] = [group[0] for group in grouped_list]\n\n\n\n if 'tags' in element:\n # for relation in element_r['elements']:\n # if relation['type'] == 'relation':\n # for members in relation['members']:\n # if members['ref'] == element['id']:\n for useful_tag in useful_tags_path_e:\n if useful_tag in element['tags']:\n path[useful_tag] = element['tags'][useful_tag]\n # for useful_tag in useful_tags_path_r:\n # if useful_tag in relation['tags']:\n # try:\n # path[useful_tag] = path[useful_tag] + \";\" + relation['tags'][useful_tag]\n # except KeyError:\n # path[useful_tag] = relation['tags'][useful_tag]\n # pass\n\n return path\n def parse_osm_nodes_paths(osm_data):\n \"\"\"\n Construct dicts of nodes and paths with key=osmid and value=dict of\n attributes.\n\n Parameters\n ----------\n osm_data : dict\n JSON response from from the Overpass API\n\n Returns\n -------\n nodes, paths : tuple\n \"\"\"\n\n nodes = {}\n paths = {}\n relation = {}\n\n # for element in osm_data['elements']:\n # if element['type'] == 'relation':\n\n\n for element in osm_data['elements']:\n if element['type'] == 'node':\n key = element['id']\n nodes[key] = get_node(element)\n\n elif element['type'] == 'way': #osm calls network paths 'ways'\n key = element['id']\n # pp.pprint(element)\n paths[key] = get_path(element,osm_data)\n\n return nodes, paths\n def create_graph(response_jsons, name='unnamed', retain_all=True, bidirectional=False):\n \"\"\"\n Create a networkx graph from Overpass API HTTP response objects.\n\n Parameters\n ----------\n response_jsons : list\n list of dicts of JSON responses from from the Overpass API\n name : string\n the name of the graph\n retain_all : bool\n if True, return the entire graph even if it is not connected\n bidirectional : bool\n if True, create bidirectional edges for one-way streets\n\n Returns\n -------\n networkx multidigraph\n \"\"\"\n\n log('Creating networkx graph from downloaded OSM data...')\n start_time = time.time()\n\n # make sure we got data back from the server requests\n elements = []\n # for response_json in response_jsons:\n elements.extend(response_json['elements'])\n if len(elements) < 1:\n raise EmptyOverpassResponse('There are no data elements in the response JSON objects')\n\n # create the graph as a MultiDiGraph and set the original CRS to default_crs\n G = nx.MultiDiGraph(name=name, crs=settings.default_crs)\n\n # extract nodes and paths from the downloaded osm data\n nodes = {}\n paths = {}\n # for osm_data in response_jsons:\n nodes_temp, paths_temp = parse_osm_nodes_paths(response_jsons)\n for key, value in nodes_temp.items():\n nodes[key] = value\n for key, value in paths_temp.items():\n paths[key] = value\n\n # add each osm node to the graph\n for node, data in nodes.items():\n G.add_node(node, **data)\n\n # add each osm way (aka, path) to the graph\n G = ox.add_paths(G, paths, bidirectional=bidirectional)\n\n # retain only the largest connected component, if caller did not\n # set retain_all=True\n if not retain_all:\n G = get_largest_component(G)\n\n log('Created graph with {:,} nodes and {:,} edges in {:,.2f} seconds'.format(len(list(G.nodes())), len(list(G.edges())), time.time()-start_time))\n\n # add length (great circle distance between nodes) attribute to each edge to\n # use as weight\n if len(G.edges) > 0:\n G = ox.add_edge_lengths(G)\n\n return G\n def calculate_H(s_lat,s_lon,e_lat,e_lon):\n \"\"\"\n Calculate a distance with x,y coordinates with\n\n Parameters\n ----------\n s_lat : float (starting lat)\n s_lon : float (starting lon)\n e_lat : float (ending lat)\n e_lon : float (ending lon)\n\n Returns\n -------\n distance\n \"\"\"\n R = 6371.0\n snlat = radians(s_lat)\n snlon = radians(s_lon)\n elat = radians(e_lat)\n elon = radians(e_lon)\n actual_dist = 6371.01 * acos(sin(snlat) * sin(elat) + cos(snlat) * cos(elat) * cos(snlon - elon))\n actual_dist = actual_dist * 1000\n return actual_dist\n def bus_details_SD(adjacent_list):\n \"\"\"\n store all details from LTA data mall into dictionary\n\n Parameters\n ----------\n adjacent_list : dict\n\n Returns\n -------\n adjacent_list : dict\n \"\"\"\n\n temp = 0\n for x in results:\n if temp != x.get('ServiceNo'):\n temp = x.get('ServiceNo')\n count = 0\n adja_bus_stop = my_dictionary()\n adjacent_list.add(temp, adja_bus_stop)\n adja_bus_stop.add(count, [x.get('BusStopCode'), x.get('Distance')])\n count += 1\n else:\n adja_bus_stop.add(count, [x.get('BusStopCode'), x.get('Distance')])\n count += 1\n return adjacent_list\n def get_nearestedge_node(osm_id, a, G):\n \"\"\"\n Find the nearest node available in Open street map\n\n Parameters\n ----------\n osm_id : node ID\n a : plotting graph\n g : bus graph\n\n Returns\n -------\n temp_nearest_edge[1]/temp_nearest_edge[2] : nearest node to a way ID\n \"\"\"\n temp_y = G.nodes.get(osm_id).get('y')\n temp_x = G.nodes.get(osm_id).get('x')\n temp_nearest_edge = ox.get_nearest_edge(a, (temp_y, temp_x))\n temp_1 = temp_nearest_edge[0].coords[0]\n temp_2 = temp_nearest_edge[0].coords[1]\n temp1_x = temp_1[0]\n temp1_y = temp_1[1]\n temp_1_distance = calculate_H(temp1_y,temp1_x,temp_y,temp_x)\n\n temp2_x = temp_2[0]\n temp2_y = temp_2[1]\n temp_2_distance = calculate_H(temp2_y,temp2_x,temp_y,temp_x)\n if temp_1_distance < temp_2_distance:\n return temp_nearest_edge[1]\n else:\n return temp_nearest_edge[2]\n def delete_duplicate(x):\n \"\"\"\n Delete duplicate within a list\n\n Parameters\n ----------\n x : list\n\n Returns\n -------\n list\n \"\"\"\n return list(dict.fromkeys(x))\n def request_busG():\n \"\"\"\n Find all nodes that is a bus stop\n\n Returns\n -------\n busG : dict\n \"\"\"\n busG = {}\n for x in G.nodes.items():\n if x[1].get('highway') == 'bus_stop':\n xy = []\n xy.append(x[1].get('osmid'))\n xy.append(x[1].get('x'))\n xy.append(x[1].get('y'))\n busG[x[1].get('osmid')] = xy\n\n return busG\n\n # ---MAIN---#\n\n query_str = '[out:json][timeout:180];node[\"type\"=\"route\"](1.385700,103.887300,1.422000,103.925900);way[\"type\"=\"route\"](1.385700,103.887300,1.422000,103.925900);(relation[\"type\"=\"route\"](1.385700,103.887300,1.422000,103.925900);>;);out;'\n response_json = overpass_request(data={'data': query_str}, timeout=180)\n pp = pprint.PrettyPrinter(indent=4)\n # start = 1847853709\n # end = 410472575\n # end = 3737148763\n # bus transit\n # start = 2110621974\n # end = 2085845884\n\n adjacent_list = my_dictionary()\n\n G = ox.load_graphml('Bus_Overpass.graphml')\n\n if case == 1:\n return request_busG()\n n, e = ox.graph_to_gdfs(G)\n # e.to_csv(\"Edge_test_busstop.csv\")\n if len(results) == 0:\n\n results = bus_details_all(results) # Details from LTA Datamall, extracting all details such as service no, bus stop number\n\n adjacent_list = bus_details_SD(adjacent_list) # From results, it extracts bus stop number and distance\n start_busstop = (G.nodes.get(start)).get('asset_ref')\n end_busstop = (G.nodes.get(end)).get('asset_ref')\n\n #Start finding common bus service within the start bus stop and end bus stop\n try:\n if \";\" in (G.nodes.get(start).get('route_ref')):\n start_rr = (G.nodes.get(start).get('route_ref')).split(\";\")\n else:\n start_rr = []\n start_rr.append((G.nodes.get(start).get('route_ref')))\n print(\"TEST - G.nodes.get(end): \", G.nodes.get(end))\n if \";\" in (G.nodes.get(end).get('route_ref')):\n end_rr = (G.nodes.get(end).get('route_ref')).split(\";\")\n else:\n end_rr = []\n end_rr.append((G.nodes.get(end).get('route_ref')))\n common = list(set(start_rr) & set(end_rr))\n except:\n return -1\n\n \"\"\"\n This method strictly emphasis on greedy algorithm. Thus it will prioritze the numbers of transit rather than distance\n Check if any common bus service within start and end bus stop.\n If found, route_list will capture the entire route of the common bus service \n No transit will occuer as it is a straight path, start busstop -> end busstop\n If not found, the program will proceed to find a common bus stop within the start and end bus services. \n Thus a transit will occur, start busstop -> mid busstop -> end busstop\n \"\"\"\n route_list = {}\n mid_route_list = {}\n # print(\"TEST - Start: \", start_busstop)\n # print(\"TEST - End: \", end_busstop)\n # print(\"TEST - start_rr: \", start_rr)\n # print(\"TEST - end_rr: \", end_rr)\n # print(\"TEST - Common: \", common)\n common_mid = []\n if len(common) == 0: #No common bus service found\n while(len(common_mid) == 0): #Start finding a common mid busstop\n rona_one = []\n rona_two = []\n for start_to_mid in start_rr: #Capture all common mid busstop\n print(\"TEST - start_to_mid: \", start_to_mid)\n for bus_sequence in adjacent_list.get(start_to_mid):\n rona_one.append(str(adjacent_list.get(start_to_mid).get(bus_sequence)[0]))\n for mid_to_end in end_rr:\n print(\"TEST - mid_to_end: \", mid_to_end)\n for bus_sequence in adjacent_list.get(mid_to_end):\n rona_two.append(str(adjacent_list.get(mid_to_end).get(bus_sequence)[0]))\n found_br = []\n print(\"TEST rona 1:\", rona_one)\n print (\"TEST rona 2:\", rona_two)\n found_br.append(start_to_mid+\";\"+mid_to_end)\n found_br.extend(list(set(rona_one)&set(rona_two)))\n common_mid.append(found_br)\n\n print(\"TEST - common_mid: \",common_mid)\n\n bus_service = start_to_mid\n temp_bus = []\n mid_busstop = 0\n approved = 0\n for bus_sequence in adjacent_list.get(bus_service): #Finding bus service for start busstop -> mid busstop\n for x in range (0, len(common_mid)):\n for i in common_mid[x]:\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(start_busstop):\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 1\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(i) and approved == 1:\n mid_busstop = str(i)\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 0\n break\n if approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n if mid_busstop != 0:\n break\n if str(start_busstop) not in temp_bus or str(mid_busstop) not in temp_bus: #If not found, continue to next loop\n continue\n temp_bus = delete_duplicate(temp_bus)\n mid_route_list[bus_service] = temp_bus\n\n for x in G.nodes: #After finding bus service to mid busstop, start finding path mid busstop to end busstop\n if G.nodes.get(x).get('asset_ref') == mid_busstop:\n if \";\" in (G.nodes.get(x).get('route_ref')):\n start_rr = (G.nodes.get(x).get('route_ref')).split(\";\")\n else:\n start_rr = []\n start_rr.append((G.nodes.get(start).get('route_ref')))\n\n common = list(set(start_rr) & set(end_rr))\n start_busstop = mid_busstop\n if start == 1847853709: #If bus service started from punggol interchange\n for bus_service in common:\n temp_bus = []\n approved = 0\n for bus_sequence in adjacent_list.get(bus_service): #Capture bus route\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(start_busstop) and adjacent_list.get(bus_service).get(bus_sequence)[1] == 0:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 1\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(end_busstop) and approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 0\n break\n if approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n if str(start_busstop) not in temp_bus or str(end_busstop) not in temp_bus:\n continue\n route_list[bus_service] = temp_bus\n else:\n for bus_service in common: #If bus service does not start from punggol interchange\n temp_bus = []\n approved = 0\n for bus_sequence in adjacent_list.get(bus_service): #Capture bus route\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(start_busstop):\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 1\n if str(adjacent_list.get(bus_service).get(bus_sequence)[0]) == str(end_busstop) and approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n approved = 0\n break\n if approved == 1:\n temp_bus.append(adjacent_list.get(bus_service).get(bus_sequence)[0])\n if str(start_busstop) not in temp_bus or str(end_busstop) not in temp_bus:\n continue\n route_list[bus_service] = temp_bus\n\n \"\"\"\n After capturing all the bus serivce. A comparison is made in favor for the number of bus stops\n It will choose the least amount of bus stops and store in post_compare\n \"\"\"\n compare = [0, 100]\n if len(route_list.keys()) > 1:\n for i in route_list:\n if len(route_list.get(i)) < compare[1]:\n compare[0] = i\n compare[1] = len(route_list.get(i))\n else:\n for i in route_list:\n compare[0] = i\n compare[1] = len(route_list.get(i))\n post_compare = []\n print(\"TEST - Mid route list: \", mid_route_list)\n if len(mid_route_list) != 0:\n for i in mid_route_list:\n post_compare.append(i)\n route_list[i] = mid_route_list.get(i)\n post_compare.append(compare[0])\n else:\n post_compare.append(compare[0])\n\n\n\n \"\"\"\n Upon comparison, it will start capturing the nodes within the bus path and store in plot_list\n \"\"\"\n plot_list = []\n try:\n print(\"TEST - post_Compare: \", post_compare)\n print(\"TEST - Route list: \", route_list)\n for count in range (0, len(post_compare)):\n for x in route_list.get(str(post_compare[count])):\n for i in G.nodes:\n if str(G.nodes.get(i).get('asset_ref')) == str(x):\n plot_list.append(G.nodes.get(i).get('osmid'))\n break\n except:\n return -1\n edge_list = []\n punggol = (1.403948, 103.909048)\n \"\"\"\n It will generate out the list of node ID for the UI to plot\n \"\"\"\n a = ox.load_graphml('Bus_graph.graphml')\n for x in plot_list:\n edge_list.append(get_nearestedge_node(x,a,G))\n\n print(\"TEST - Plot list: \", plot_list)\n print(\"TEST - Edge list: \", edge_list)\n final_route_list = []\n count_stops = len(plot_list)\n for x in range (0, len(edge_list)-1):\n final_route_list.append(nx.shortest_path(a, edge_list[x], edge_list[x+1]))\n print(final_route_list)\n return final_route_list", "def request_device_readings_quartiles(device_uuid):\n\n # Set the db that we want and open the connection\n start = request.args.get('start')\n end = request.args.get('end')\n if app.config['TESTING']:\n conn = sqlite3.connect('test_database.db')\n else:\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n cur = conn.cursor()\n #check for start\n if start != None and end != None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" and date_created<\"{}\" \n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n start, end, \n device_uuid, \n start, end, \n device_uuid, \n device_uuid, \n start, end,\n device_uuid, \n start, end,\n device_uuid, \n device_uuid, \n start, end,\n device_uuid, \n start, end\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start != None and end == None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created>\"{}\"\n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n start, \n device_uuid, \n start,\n device_uuid, \n device_uuid, \n start,\n device_uuid, \n start,\n device_uuid, \n device_uuid, \n start,\n device_uuid, \n start,\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start == None and end != None:\n # Execute the query\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\" and date_created<\"{}\" \n )\n ) as T3\n '''.format(\n device_uuid, \n device_uuid, \n end, \n device_uuid, \n end, \n device_uuid, \n device_uuid, \n end,\n device_uuid, \n end,\n device_uuid, \n device_uuid, \n end,\n device_uuid, end\n ))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200\n if start == None and end == None:\n cur.execute('''\n select * from\n (\n SELECT AVG(value) FROM readings where value < (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n ) as T1\n ,\n (\n SELECT AVG(value) FROM readings where value > (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n )\n ) as T2\n ,\n (\n SELECT value FROM readings where device_uuid=\"{}\" ORDER BY value LIMIT 2 - (\n SELECT COUNT(*) FROM readings where device_uuid=\"{}\"\n ) % 2 OFFSET (\n SELECT (COUNT(*) - 1) / 2 FROM readings where device_uuid=\"{}\"\n )\n ) as T3\n '''.format(device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid, device_uuid))\n rows = cur.fetchall()\n eljson = jsonify([dict(zip(['quartile_1', 'quartile_3', 'median'], row)) for row in rows])\n # Return the JSON\n return eljson, 200", "def load():\n\n time.sleep(0.2) # Used to simulate delay\n\n if request.args:\n counter = int(request.args.get(\"c\")) # The 'counter' value sent in the QS\n\n if counter == 0:\n print(f\"Returning posts 0 to {quantity}\")\n # Slice 0 -> quantity from the db\n res = make_response(jsonify(db[0: quantity]), 200)\n\n elif counter == posts:\n print(\"No more posts\")\n res = make_response(jsonify({}), 200)\n\n else:\n print(f\"Returning posts {counter} to {counter + quantity}\")\n # Slice counter -> quantity from the db\n res = make_response(jsonify(db[counter: counter + quantity]), 200)\n\n return res", "async def end_point(subscription_data: dict):\n logger.debug([\"Raw data: \", subscription_data])\n\n data = subscription_data.get(\"data\")\n if not data:\n logger.error(\"No subscription data.\")\n raise HTTPException(status_code=500, detail=\"No subscription data.\")\n\n redis = app.state.redis\n if not await redis.get_key(\"influxdb_buckets\"):\n logger.error(\"Buckets config undefined.\")\n raise HTTPException(\n status_code=500, detail=\"Buckets config undefined.\")\n if not await redis.get_key(\"influxdb_measurements\"):\n logger.error(\"Measurements config undefined.\")\n raise HTTPException(\n status_code=500, detail=\"Measurements config undefined.\")\n if not await redis.get_key(\"influxdb_organizations\"):\n logger.error(\"Organizations config undefined.\")\n raise HTTPException(\n status_code=500, detail=\"Organizations config undefined.\")\n # records = []\n records = {}\n found_org = False\n\n measurements_obj = orjson.loads(await redis.get_key(\"influxdb_measurements\"))\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n buckets_obj = orjson.loads(await redis.get_key(\"influxdb_buckets\"))\n\n for entity in data:\n measurement_data = measurements_obj.get(\n entity[\"metadata\"][\"measurement\"])\n if measurement_data is None:\n raise HTTPException(\n status_code=404, detail=\"Measurement not found\")\n measurement = entity[\"metadata\"][\"measurement\"]\n logger.debug([\"Measurement: \", measurement])\n logger.debug(\n [\"Entity measurement: \", entity[\"metadata\"][\"measurement\"]])\n tags = {}\n fields = {}\n for tag in measurement_data[\"influx_data\"][\"tags\"]:\n logger.debug([\"tag: \", tag])\n for var1 in entity:\n logger.debug([\"entity var:\", var1])\n if measurement_data[\"influx_data\"][\"tags\"][tag] == var1:\n tags[tag] = entity[var1]\n for field in measurement_data[\"influx_data\"][\"fields\"]:\n logger.debug([\"field: \", field])\n for var2 in entity:\n logger.debug([\"entity var:\", var2])\n if measurement_data[\"influx_data\"][\"fields\"][field] == var2:\n fields[field] = entity[var2]\n\n if not tags:\n logger.warning(\"No tags found\")\n if not fields:\n logger.error([\"No fields matched\", subscription_data])\n raise HTTPException(status_code=406, detail=\"No fields matched\")\n\n insert_object = {\n \"measurement\": measurement,\n \"tags\": tags,\n \"fields\": fields\n }\n logger.info([\"Insert object: \", insert_object])\n\n for org in organizations_obj:\n for bucket in organizations_obj.get(org, {}).get(\"buckets\", []):\n for measurement_temp in buckets_obj.get(bucket, {}).get(\"measurements\", {}):\n if measurement == measurement_temp:\n found_org = True\n logger.debug([\"Organization: \", org])\n logger.debug([\"Bucket: \", bucket])\n logger.debug([\"Measurement temp: \", measurement_temp])\n logger.debug([\"Measurement: \", measurement])\n insert_object[\"time\"] = iso_time()\n # records.append(insert_object)\n if not records.get(org):\n records[org] = {}\n if not records.get(org).get(bucket):\n records[org][bucket] = []\n records[org][bucket].append(insert_object)\n\n logger.debug([\"Records:\", records])\n\n if not found_org:\n logger.error(\n 'No organizations or buckets matched for %measurement measurement.')\n raise HTTPException(\n status_code=406,\n detail=\"No organizations or buckets matched for {} measurement.\".format(\n measurement)\n )\n\n logger.info(\"Trying to push data to InfluxDB...\")\n influxdb = app.state.influxdb\n\n for org in records:\n for bucket in records[org]:\n influxdb.write_data(org=org, bucket=bucket,\n records=records[org][bucket])\n\n logger.info(\"Successfully inserted data into InfluxDB.\")\n return HTMLResponse(content=\"Successfully inserted data into InfluxDB.\", status_code=200)", "def _invoke_pagerduty_resource_api(uri, headers, json_root, params={}, timeout_seconds=None):\n uri_parts = list(urlparse.urlparse(uri))\n uri_parts[4] = urllib.urlencode(params, True)\n query_uri = urlparse.urlunparse(uri_parts)\n\n req = urllib2.Request(query_uri)\n for header,value in headers.iteritems():\n req.add_header(header, value)\n _add_default_headers(req)\n\n try:\n f = urllib2.urlopen(req, None, timeout_seconds)\n except urllib2.URLError as e:\n if hasattr(e, 'code'):\n if e.code == 401: # Unauthorized\n raise InvalidTokenException()\n else:\n msg = 'The PagerDuty server couldn\\'t fulfill the request: HTTP %d (%s)' % (e.code, e.msg)\n raise PagerDutyUnreachableException(msg)\n elif hasattr(e, 'reason'):\n msg = 'Failed to contact the PagerDuty server: %s' % (e.reason)\n raise PagerDutyUnreachableException(msg)\n else:\n raise PagerDutyUnreachableException()\n\n response_data = f.read()\n f.close()\n\n try:\n response = json.loads(response_data)\n except ValueError as e:\n raise ParseException(e.message)\n\n if type(response) is not DictType:\n raise ParseException('Dictionary not returned')\n\n if json_root not in response:\n raise ParseException(\"Missing '%s' key in API response\" % json_root)\n\n resource = response[json_root]\n\n if type(resource) is not ListType:\n raise ParseException(\"'%s' is not a list\" % json_root)\n\n limit = response.get('limit')\n offset = response.get('offset')\n more = response.get('more')\n\n if more:\n newOffset = offset + limit\n params.update({'limit': limit, 'offset': newOffset})\n return resource + _invoke_pagerduty_resource_api(uri, headers, json_root, params, timeout_seconds)\n else:\n return resource", "def _api_query_paginated(\n self,\n options: dict[str, Any],\n case: Literal['trades', 'asset_movements'],\n ) -> Union[list[Trade], list[AssetMovement], list]:\n endpoint: Literal['trades', 'movements']\n case_: Literal['trades', 'asset_movements']\n if case == 'trades':\n endpoint = 'trades'\n case_ = 'trades'\n elif case == 'asset_movements':\n endpoint = 'movements'\n case_ = 'asset_movements'\n else:\n raise AssertionError(f'Unexpected {self.name} case: {case}')\n\n call_options = options.copy()\n limit = options['limit']\n results: Union[list[Trade], list[AssetMovement], list] = []\n processed_result_ids: set[int] = set()\n retries_left = API_REQUEST_RETRY_TIMES\n while retries_left >= 0:\n response = self._api_query(\n endpoint=endpoint,\n options=call_options,\n )\n if response.status_code != HTTPStatus.OK:\n try:\n error_response = json.loads(response.text)\n except JSONDecodeError:\n msg = f'{self.name} {case} returned an invalid JSON response: {response.text}.'\n log.error(msg, options=call_options)\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n # Check if the rate limits have been hit (response JSON as dict)\n if isinstance(error_response, dict):\n if error_response.get('error', None) == API_RATE_LIMITS_ERROR_MESSAGE:\n if retries_left == 0:\n msg = (\n f'{self.name} {case} request failed after retrying '\n f'{API_REQUEST_RETRY_TIMES} times.'\n )\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n # Trigger retry\n log.debug(\n f'{self.name} {case} request reached the rate limits. Backing off',\n seconds=API_REQUEST_RETRY_AFTER_SECONDS,\n options=call_options,\n )\n retries_left -= 1\n gevent.sleep(API_REQUEST_RETRY_AFTER_SECONDS)\n continue\n\n # Unexpected JSON dict case, better to log it\n msg = f'Unexpected {self.name} {case} unsuccessful response JSON'\n log.error(msg, error_response=error_response)\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n return self._process_unsuccessful_response(\n response=response,\n case=case_,\n )\n\n try:\n response_list = jsonloads_list(response.text)\n except JSONDecodeError:\n msg = f'{self.name} {case} returned invalid JSON response: {response.text}.'\n log.error(msg)\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n results_ = self._deserialize_api_query_paginated_results(\n case=case_,\n options=call_options,\n raw_results=response_list,\n processed_result_ids=processed_result_ids,\n )\n results.extend(cast(Iterable, results_))\n # NB: Copying the set before updating it prevents losing the call args values\n processed_result_ids = processed_result_ids.copy()\n # type ignore is due to always having a trade link for bitfinex trades\n processed_result_ids.update({int(result.link) for result in results_}) # type: ignore\n\n if len(response_list) < limit:\n break\n # Update pagination params per endpoint\n # NB: Copying the dict before updating it prevents losing the call args values\n call_options = call_options.copy()\n call_options.update({\n 'start': results[-1].timestamp * 1000,\n })\n\n return results", "def test_radar_request_site_historic_px250_bufr_timerange(default_settings):\n\n timestamp = dt.datetime.utcnow() - dt.timedelta(days=1)\n\n request = DwdRadarValues(\n parameter=DwdRadarParameter.PX250_REFLECTIVITY,\n start_date=timestamp,\n end_date=dt.timedelta(hours=1),\n site=DwdRadarSite.BOO,\n settings=default_settings,\n )\n\n # Verify number of elements.\n results = list(request.query())\n\n if len(results) == 0:\n raise pytest.skip(\"Data currently not available\")\n\n assert len(results) == 12", "def pull_data(stop_event):\r\n logger = logging.getLogger(__name__)\r\n\r\n # List of current formats supported\r\n currency_list = ['https://www.bitstamp.net/api/v2/ticker_hour/btceur', 'https://www.bitstamp.net/api/v2/ticker_hour/btcusd',\r\n 'https://www.bitstamp.net/api/v2/ticker_hour/ethusd', 'https://www.bitstamp.net/api/v2/ticker_hour/etheur']\r\n\r\n # Loop until told otherwise!\r\n while not stop_event.is_set():\r\n for currency in currency_list:\r\n res = requests.get(currency)\r\n try:\r\n res.raise_for_status()\r\n except requests.exceptions.HTTPError as e:\r\n # Not 200\r\n logger.error(str(e))\r\n continue\r\n\r\n # Get the end characters to dertermine the type e.g. btceur, ethusd, etc...\r\n currency_type = (currency.rpartition('/')[-1])\r\n logger.info('The Curreny type: ' + currency_type)\r\n\r\n if currency_type == 'btceur':\r\n table = 'btceur'\r\n elif currency_type == 'btcusd':\r\n table = 'btcusd'\r\n elif currency_type == 'ethusd':\r\n table = 'ethusd'\r\n elif currency_type == 'etheur':\r\n table = 'etheur'\r\n else:\r\n table = None\r\n\r\n # Extract Data and Fields\r\n data = res.json()\r\n field_list = data.keys()\r\n logger.info(field_list)\r\n value_list = data.values()\r\n logger.info(value_list)\r\n\r\n # Write to DB\r\n db_commit(table, field_list, value_list)\r\n # Cannot make more than 600 requests per 10 minutes or they will ban your IP address.\r\n # Will in time get real time using their websocket API.\r\n time.sleep(5)", "def get_data(host, query, idx, limit, debug, threshold=300, ckey=None,\n cert=None, das_headers=True):\n params = {'input':query, 'idx':idx, 'limit':limit}\n path = '/das/cache'\n pat = re.compile('http[s]{0,1}://')\n if not pat.match(host):\n msg = 'Invalid hostname: %s' % host\n raise Exception(msg)\n url = host + path\n headers = {\"Accept\": \"application/json\", \"User-Agent\": DAS_CLIENT}\n encoded_data = urllib.urlencode(params, doseq=True)\n url += '?%s' % encoded_data\n req = urllib2.Request(url=url, headers=headers)\n if ckey and cert:\n ckey = fullpath(ckey)\n cert = fullpath(cert)\n http_hdlr = HTTPSClientAuthHandler(ckey, cert, debug)\n else:\n http_hdlr = urllib2.HTTPHandler(debuglevel=debug)\n proxy_handler = urllib2.ProxyHandler({})\n cookie_jar = cookielib.CookieJar()\n cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)\n opener = urllib2.build_opener(http_hdlr, proxy_handler, cookie_handler)\n fdesc = opener.open(req)\n data = fdesc.read()\n fdesc.close()\n\n pat = re.compile(r'^[a-z0-9]{32}')\n if data and isinstance(data, str) and pat.match(data) and len(data) == 32:\n pid = data\n else:\n pid = None\n iwtime = 2 # initial waiting time in seconds\n wtime = 20 # final waiting time in seconds\n sleep = iwtime\n time0 = time.time()\n while pid:\n params.update({'pid':data})\n encoded_data = urllib.urlencode(params, doseq=True)\n url = host + path + '?%s' % encoded_data\n req = urllib2.Request(url=url, headers=headers)\n try:\n fdesc = opener.open(req)\n data = fdesc.read()\n fdesc.close()\n except urllib2.HTTPError as err:\n return {\"status\":\"fail\", \"reason\":str(err)}\n if data and isinstance(data, str) and pat.match(data) and len(data) == 32:\n pid = data\n else:\n pid = None\n time.sleep(sleep)\n if sleep < wtime:\n sleep *= 2\n elif sleep == wtime:\n sleep = iwtime # start new cycle\n else:\n sleep = wtime\n if (time.time()-time0) > threshold:\n reason = \"client timeout after %s sec\" % int(time.time()-time0)\n return {\"status\":\"fail\", \"reason\":reason}\n jsondict = json.loads(data)\n return jsondict", "def bleep(url):\n print(url)\n r = requests.get(url)\n\n try:\n next_page = r.links[\"next\"][\"url\"]\n except KeyError:\n next_page = None\n\n print(\"r.status_code\", r.status_code)\n print(\"X-Ratelimit-Limit\", r.headers[\"X-Ratelimit-Limit\"])\n print(\"X-Ratelimit-Remaining\", r.headers[\"X-Ratelimit-Remaining\"])\n\n if r.status_code == 200:\n return r.json(), next_page\n\n return None, None", "def sendReadings():\n # Get parameters\n\n # Time example: 2013-04-11T20:46:54.892+08:00\n try:\n sincedate = parser.parse(request.query.get('sincedate'))\n except ValueError:\n sincedate = None\n\n try:\n tilldate = parser.parse(request.query.get('tilldate'))\n except ValueError:\n tilldate = None\n\n try:\n limit = int(request.query.get('limit'))\n except ValueError:\n limit = None\n if limit is None:\n limit = 10000\n\n # Get results and format to match native JSON\n results = readings.find({\"date\": {\"$gt\": sincedate, \"$lte\": tilldate}}).sort([(\"date\", 1)]).limit(limit)\n outresult = json.loads(dumps(results))\n for i in range(len(outresult)):\n outresult[i]['date'] = datetime.datetime.utcfromtimestamp(float(outresult[i]['date']['$date']/1000.0)).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n outresult[i]['_id'] = outresult[i]['_id']['$oid']\n\n return {\"result\": \"OK\", \"readings\": outresult}", "def try_query(pid):\n retries = 1\n while True:\n try:\n query = client.query_data_points(page_size=PAGE_SIZE, source=pid)\n return query\n except HTTPError as e:\n if retries > 10:\n raise e\n print(e)\n wait = retries * 15\n time.sleep(wait)\n retries += 1", "def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"') \"\n \"limit 200000 \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=query)\n # logger.debug (\"%s\",r)\n payload = r.json()\n # logger.debug(\"Payload:%s\", payload)\n\n if apt_no in ['102A', 102]:\n apt_no = 102\n meters = retrieve_meter_info(apt_no)\n logger.debug(\"Meters: %s\", meters)\n\n streams = []\n meter_type = []\n l_meters = range(0, len(meters))\n for i in l_meters:\n uuid = payload[i]['uuid']\n\n # Get meter type based on uuid\n for meter in meters:\n if meter['uuid'] == uuid:\n m_type = meter['type']\n # logger.debug (uuid, m_type)\n\n meter_type.append(m_type)\n streams.append(np.array(payload[i]['Readings']))\n # logger.debug(\"Streams: %s\", streams)\n\n if len(streams[0]) > 0:\n\n df = [pd.DataFrame({'time': readings[:, 0] / 1000, 'power': readings[:, 1],\n 'type': [meter_type[i]] * len(readings)},\n columns=['time', 'power', 'type']) for i, readings in enumerate(streams)]\n else:\n df = []\n\n return df", "def retrieve_report(resource, url, key):\n # TODO: manage time\n params = {\"apikey\": key, \"resource\": resource}\n res = requests.post(url, data=params)\n\n while res.status_code == 204 or json.loads(res.text)[\"response_code\"] == -2:\n time.sleep(15)\n res = requests.post(url, data=params)\n\n return res", "def rate_limiter(rl_params):\n # Please respect the parties providing these free api's to us and do not modify this code.\n # If I suspect any abuse I will revoke all api keys and require all users\n # to have a personal api key for all services.\n # Thank you\n if not rl_params:\n return\n monitor = xbmc.Monitor()\n win = xbmcgui.Window(10000)\n rl_name = rl_params[0]\n rl_delay = rl_params[1]\n cur_timestamp = int(time.mktime(datetime.datetime.now().timetuple()))\n prev_timestamp = try_parse_int(win.getProperty(\"ratelimiter.%s\" % rl_name))\n if (prev_timestamp + rl_delay) > cur_timestamp:\n sec_to_wait = (prev_timestamp + rl_delay) - cur_timestamp\n log_msg(\n \"Rate limiter active for %s - delaying request with %s seconds - \"\n \"Configure a personal API key in the settings to get rid of this message and the delay.\" %\n (rl_name, sec_to_wait), xbmc.LOGNOTICE)\n while sec_to_wait and not monitor.abortRequested():\n monitor.waitForAbort(1)\n # keep setting the timestamp to create some sort of queue\n cur_timestamp = int(time.mktime(datetime.datetime.now().timetuple()))\n win.setProperty(\"ratelimiter.%s\" % rl_name, \"%s\" % cur_timestamp)\n sec_to_wait -= 1\n # always set the timestamp\n cur_timestamp = int(time.mktime(datetime.datetime.now().timetuple()))\n win.setProperty(\"ratelimiter.%s\" % rl_name, \"%s\" % cur_timestamp)\n del monitor\n del win", "def get_data(\n begin_date, end_date, stationid, product, datum=None, bin_num=None,\n interval=None, units='metric', time_zone='gmt'):\n # Convert dates to datetime objects so deltas can be calculated\n begin_datetime = parse_known_date_formats(begin_date)\n end_datetime = parse_known_date_formats(end_date)\n delta = end_datetime - begin_datetime\n\n # If the length of our data request is less or equal to 31 days,\n # we can pull the data from API in one request\n if delta.days <= 31:\n data_url = build_query_url(\n begin_datetime.strftime(\"%Y%m%d %H:%M\"),\n end_datetime.strftime(\"%Y%m%d %H:%M\"),\n stationid, product, datum, bin_num, interval, units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is less than 365 days\n # AND the product is hourly_height or high_low, we can pull data directly\n # from the API in one request\n elif delta.days <= 365 and (\n product == 'hourly_height' or product == 'high_low'):\n data_url = build_query_url(\n begin_date, end_date, stationid, product, datum, bin_num, interval,\n units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is greater than 365 days\n # AND the product is hourly_height or high_low, we need to load data from\n # the API in365 day blocks.\n elif product == 'hourly_height' or product == 'high_low':\n # Find the number of 365 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_365day_blocks = int(math.floor(delta.days / 365))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 365 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_365day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 365))\n end_datetime_loop = begin_datetime_loop + timedelta(days=365)\n\n # If end_datetime_loop of the current 365 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build url for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_365day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # If the length of the user specified data request is greater than 31 days\n # for any other products, we need to load data from the API in 31 day\n # blocks\n else:\n # Find the number of 31 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_31day_blocks = int(math.floor(delta.days / 31))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 31 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_31day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 31))\n end_datetime_loop = begin_datetime_loop + timedelta(days=31)\n\n # If end_datetime_loop of the current 31 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build URL for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_31day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # Rename output dataframe columns based on requested product\n # and convert to useable data types\n if product == 'water_level':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'q': 'QC', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'QC', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'hourly_height':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'high_low':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'ty': 'high_low',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Separate to high and low dataframes\n df_HH = df[df['high_low'] == \"HH\"].copy()\n df_HH.rename(columns={'date_time': 'date_time_HH',\n 'water_level': 'HH_water_level'},\n inplace=True)\n\n df_H = df[df['high_low'] == \"H \"].copy()\n df_H.rename(columns={'date_time': 'date_time_H',\n 'water_level': 'H_water_level'},\n inplace=True)\n\n df_L = df[df['high_low'].str.contains(\"L \")].copy()\n df_L.rename(columns={'date_time': 'date_time_L',\n 'water_level': 'L_water_level'},\n inplace=True)\n\n df_LL = df[df['high_low'].str.contains(\"LL\")].copy()\n df_LL.rename(columns={'date_time': 'date_time_LL',\n 'water_level': 'LL_water_level'},\n inplace=True)\n\n # Extract dates (without time) for each entry\n dates_HH = [x.date() for x in pd.to_datetime(df_HH['date_time_HH'])]\n dates_H = [x.date() for x in pd.to_datetime(df_H['date_time_H'])]\n dates_L = [x.date() for x in pd.to_datetime(df_L['date_time_L'])]\n dates_LL = [x.date() for x in pd.to_datetime(df_LL['date_time_LL'])]\n\n # Set indices to datetime\n df_HH['date_time'] = dates_HH\n df_HH.index = df_HH['date_time']\n df_H['date_time'] = dates_H\n df_H.index = df_H['date_time']\n df_L['date_time'] = dates_L\n df_L.index = df_L['date_time']\n df_LL['date_time'] = dates_LL\n df_LL.index = df_LL['date_time']\n\n # Remove flags and combine to single dataframe\n df_HH = df_HH.drop(\n columns=['flags', 'high_low'])\n df_H = df_H.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_L = df_L.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_LL = df_LL.drop(columns=['flags', 'high_low',\n 'date_time'])\n\n # Keep only one instance per date (based on max/min)\n maxes = df_HH.groupby(df_HH.index).HH_water_level.transform(max)\n df_HH = df_HH.loc[df_HH.HH_water_level == maxes]\n maxes = df_H.groupby(df_H.index).H_water_level.transform(max)\n df_H = df_H.loc[df_H.H_water_level == maxes]\n mins = df_L.groupby(df_L.index).L_water_level.transform(max)\n df_L = df_L.loc[df_L.L_water_level == mins]\n mins = df_LL.groupby(df_LL.index).LL_water_level.transform(max)\n df_LL = df_LL.loc[df_LL.LL_water_level == mins]\n\n df = df_HH.join(df_H, how='outer')\n df = df.join(df_L, how='outer')\n df = df.join(df_LL, how='outer')\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(\n ['date_time', 'date_time_HH', 'date_time_H', 'date_time_L',\n 'date_time_LL'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df.index)\n df['date_time_HH'] = pd.to_datetime(df['date_time_HH'])\n df['date_time_H'] = pd.to_datetime(df['date_time_H'])\n df['date_time_L'] = pd.to_datetime(df['date_time_L'])\n df['date_time_LL'] = pd.to_datetime(df['date_time_LL'])\n\n elif product == 'predictions':\n if interval == 'h':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n\n elif interval == 'hilo':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl',\n 'type': 'hi_lo'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'hi_lo'])\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'currents':\n # Rename columns for clarity\n df.rename(columns={'b': 'bin', 'd': 'direction',\n 's': 'speed', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'wind':\n # Rename columns for clarity\n df.rename(columns={'d': 'dir', 'dr': 'compass',\n 'f': 'flags', 'g': 'gust_spd',\n 's': 'spd', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags', 'compass'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_pressure':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_press'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'water_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'water_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n # Set datetime to index (for use in resampling)\n df.index = df['date_time']\n df = df.drop(columns=['date_time'])\n\n # Handle hourly requests for water_level and currents data\n if (product == 'water_level') | (product == 'currents') & (\n interval == 'h'):\n df = df.resample('H').first() # Only return the hourly data\n\n return df", "def make_data_pipeline(self, from_, to_):\n\n # Get the volume over time data.\n r_volume = self.get_data_from_endpoint(from_, to_, 'volume')\n print('There are approximately {} documents.'.format(r_volume.json()['numberOfDocuments']))\n\n # Carve up time into buckets of volume <10k.\n l_dates = self.get_dates_from_timespan(r_volume)\n\n data = []\n for i in range(0, len(l_dates)):\n from_, to_ = l_dates[i]\n\n # Pull posts.\n r_posts = self.get_data_from_endpoint(from_, to_, 'posts')\n if r_posts.ok and (r_posts.json()['status'] != 'error'):\n j_result = json.loads(r_posts.content.decode('utf8'))\n data.extend(j_result['posts'])\n return data", "def collect_events(helper, ew):\n\n opt_start_time_start = helper.get_arg('start_time_start')\n opt_endpoints = helper.get_arg('endpoints')\n opt_interval = int(helper.get_arg('interval'))\n opt_live = False\n\n proxy = helper.get_proxy()\n if proxy:\n proxy_auth = \"{}:{}\".format(\n proxy['proxy_username'], proxy['proxy_password'])\n proxies = {\n \"https\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port']),\n \"http\": \"{protocol}://{auth}@{host}:{port}/\".format(protocol=proxy['proxy_type'], auth=proxy, host=proxy['proxy_url'], port=proxy['proxy_port'])\n }\n else:\n proxies = None\n\n helper.log_debug(\n \"[-] webex password_type: {}\".format(helper.get_global_setting(\"password_type\")))\n\n params = {\"opt_username\": helper.get_global_setting(\"username\"),\n \"opt_password\": helper.get_global_setting(\"password\"),\n \"opt_site_name\": helper.get_global_setting(\"site_name\"),\n \"limit\": 500,\n \"timezone\": \"20\",\n # \"password_type\": authentication_type[\"Password Authentication\"],\n # \"password_type\": authentication_type[\"OAuth\"],\n \"password_type\": authentication_type[helper.get_global_setting(\"password_type\")],\n \"client_id\": helper.get_global_setting(\"client_id\"),\n \"client_secret\": helper.get_global_setting(\"client_secret\"),\n \"refresh_token\": helper.get_global_setting(\"refresh_token\"),\n \"proxies\": proxies}\n\n # Historical Data\n helper.log_debug(\"Historical Data\")\n for opt_endpoint in opt_endpoints:\n helper.log_debug(\"[-] \\t At {}\".format(opt_endpoint))\n\n # endtime is midnight of GMT - 3days\n enddt = datetime.utcnow().date() - timedelta(3)\n end_time = datetime.combine(\n enddt, datetime.max.time()).strftime('%m/%d/%Y %H:%M:%S')\n\n # create checkpoint key for offest and timestamp\n timestamp_key = \"timestamp_{}_{}_processing\".format(\n helper.get_input_stanza_names(), opt_endpoint)\n\n start_time = helper.get_check_point(timestamp_key)\n if start_time is None:\n # if it's the 1st time, get the start_time from UI, and then save it in checkpoint\n start_time = opt_start_time_start\n helper.save_check_point(timestamp_key, start_time)\n else:\n # shift the start_time by 1 second\n start_time = (datetime.strptime(start_time, '%m/%d/%Y %H:%M:%S') +\n timedelta(seconds=1)).strftime('%m/%d/%Y %H:%M:%S')\n\n helper.log_debug(\"Start time: {}\".format(start_time))\n helper.log_debug(\"End time: {}\".format(end_time))\n\n # Update Parameters\n params.update({\"mode\": \"historical\"})\n params.update({\"opt_endpoint\": opt_endpoint})\n params.update({\"start_time\": start_time})\n params.update({\"end_time\": end_time})\n params.update({\"timestamp_key\": timestamp_key})\n\n records = params['limit']\n offset = 1\n while (records == params['limit']):\n helper.log_debug(\"current_offset: {}\".format(offset))\n params['offset'] = offset\n records = fetch_webex_logs(ew, helper, params)\n helper.log_debug(\"\\t Offet:{}\\tLimit: {}\\tRecords Returned: {}\".format(\n offset, params['limit'], records))\n if records:\n offset += records" ]
[ "0.5955437", "0.57257915", "0.56286526", "0.5615483", "0.5610118", "0.55875003", "0.5550483", "0.5543635", "0.5483358", "0.544748", "0.54263824", "0.5417483", "0.54158425", "0.5409253", "0.53897715", "0.53879756", "0.5376069", "0.5351717", "0.5316987", "0.53047615", "0.52869844", "0.52864397", "0.5284065", "0.52826166", "0.5266588", "0.525442", "0.5237773", "0.51919675", "0.5183441", "0.5182107" ]
0.6245338
0
returns an n period exponential moving average for the time series s s is a list ordered from oldest (index 0) to most recent (index 1) n is an integer returns a numeric array of the exponential moving average
def ema(s, n): ema = [] j = 1 #get n sma first and calculate the next n period ema sma = sum(s[:n]) / n multiplier = 2 / float(1 + n) ema.append(sma) #EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev) ema.append(( (s[n] - sma) * multiplier) + sma) #now calculate the rest of the values for i in s[n+1:]: tmp = ( (i - ema[j]) * multiplier) + ema[j] j = j + 1 ema.append(tmp) return ema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moving_average(a, n: int = 3) -> np.array:\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n", "def moving_average(a, n=5):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n ret[n-1:] *= 1 / n\n ret[:n-1] *= 1 / np.arange(1, n)\n return ret", "def ema(self, s, n):\n s = np.array(s).astype(float)\n ema = []\n j = 1\n\n # get n sma first and calculate the next n period ema\n sma = sum(s[:n]) / n\n multiplier = 2 / float(1 + n)\n ema[:0] = [sma] * n\n\n # EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)\n ema.append(( (s[n] - sma) * multiplier) + sma)\n\n # now calculate the rest of the values\n for i in s[n + 1:]:\n tmp = ( (i - ema[j]) * multiplier) + ema[j]\n ema.append(tmp)\n j = j + 1\n\n # print \"ema length = \" + str(len(ema))\n return ema", "def moving_average(a, n=3) :\r\n a = a.ravel()\r\n a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values\r\n ret = np.cumsum(a, dtype = float)\r\n ret[n:] = ret[n:] - ret[:-n]\r\n ret=ret[n - 1:] / n\r\n return ret", "def moving_average(a, n=3) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n", "def moving_average(self, a, n=3):\n ret = np.nancumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n", "def simple_moving_average(n, data):\n result = []\n for m in range(n-1, len(data)):\n total = sum([data[m-i] for i in range(n)])\n result.append(total/n)\n return result", "def moving_average(sig, n=100):\n window = deque(maxlen=n) # last n scores\n sig_ma = []\n for i in range(len(sig)):\n window.append(sig[i])\n sig_ma.append(np.mean(window))\n return sig_ma", "def add_simple_moving_average(smas, n, data):\n total = sum([data[-1-i] for i in range(n)])\n smas.append(total/n)", "def moving_average_forecast(series, window_size):\n\tforecast= []\n\tfor time in range(len(series)- window_size):\n\t\tforecast.append(series[time:time + window_size].mean())\n\treturn np.array(forecast)", "def SMA(serie, n):\r\n\r\n return serie.rolling(window=n).mean()", "def EMA(serie, n):\r\n\r\n ewm = serie.ewm(n, adjust=False).mean()\r\n ewm[0:n] = [np.nan]*n\r\n return ewm", "def compute_EMA(self, series, num_days=50):\n temp = series.copy().reset_index(drop=True) # DO NOT MODIFY THE ORIGINAL DATAFRAME!\n smoothing_factor = 2/(num_days+1)\n EMA_prev = 0.0\n for idx in range(len(temp)):\n EMA_current = (temp[idx]*smoothing_factor)+EMA_prev*(1-smoothing_factor)\n # update values for next iteration\n temp[idx] = EMA_current\n EMA_prev = EMA_current \n return temp", "def exp_series(x, n):\n return sum((exp_term(x, i) for i in xrange(0, n)))", "def sma(y, n):\n N = len(y) - n\n if n < 0:\n raise ValueError(\"Input doesn't contain enough data for moving average.\")\n\n out = [y[i:i+n].mean() for i in range(len(y) - n)]\n out = np.array(out)\n\n return out", "def ema(self, candles, n):\n\n\t s = [] # array(s)\n\t for candle in candles:\n\t \ts.append(candle.close)\n\n\t ema = []\n\t j = 1\n\n\t #get n sma first and calculate the next n period ema\n\t sma = sum(s[:n]) / n\n\t multiplier = 2 / float(1 + n)\n\t ema.append(sma)\n\n\t #EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)\n\t ema.append(( (s[n] - sma) * multiplier) + sma)\n\n\t #now calculate the rest of the values\n\t for i in s[n+1:]:\n\t tmp = ( (i - ema[j]) * multiplier) + ema[j]\n\t j = j + 1\n\t ema.append(tmp)\n\n\t return ema", "def moving_average(x, n, type='simple'):\n x = np.asarray(x)\n if type == 'simple':\n weights = np.ones(n)\n else:\n weights = np.exp(np.linspace(-1., 0., n))\n\n weights /= weights.sum()\n\n a = np.convolve(x, weights, mode='full')[:len(x)]\n a[:n] = a[n]\n return a", "def EMA_history(n_periods, values):\n\n ema = [values[0]]\n for i in range(1,len(values)):\n ema.append( EMA_tick(n_periods, values[i], ema[i-1]) )\n\n return ema", "def daymean(x):\n return nstepf(x, 24, func='mean')", "def ExpMovingAverage(values, window):\n weights = np.exp(np.linspace(-1.0, 0.0, window))\n weights /= weights.sum()\n a = np.convolve(values, weights, mode=\"full\")[: len(values)]\n a[:window] = a[window]\n return a", "def EMA_tick(n_periods, current_value, previous_ema):\n\n most_recent_weight = 2 / (n_periods + 1)\n return (current_value - previous_ema) * most_recent_weight + previous_ema", "def __ExpMovingAverage(self, values, window):\n weights = np.exp(np.linspace(-1., 0., window))\n weights /= weights.sum()\n a = np.convolve(values, weights, mode='full')[:len(values)]\n a[:window] = a[window]\n return a", "def mean(series):\n return fsum(series) / len(series)", "def moving_average(iterable, n):\n it = iter(iterable)\n d = collections.deque(itertools.islice(it, n-1))\n d.appendleft(0)\n s = sum(d)\n for elem in it:\n s += elem - d.popleft()\n d.append(elem)\n yield s / float(n)", "def calcExpMovAvg(self, windowLength, smoothingFactor):\n\n if smoothingFactor < 0 or smoothingFactor > 1:\n raise ValueError(\n \"Value of smoothing factor should be in between 0-1\")\n\n EMA_prev = sum(self.x[:windowLength])/windowLength\n n = len(self.x)\n\n EMA = []\n\n for i in range(n):\n if i >= windowLength:\n temp = smoothingFactor*self.x[i] + (1-smoothingFactor)*EMA_prev\n EMA.append(temp)\n EMA_prev = temp\n return [None]*windowLength + EMA", "def moving_average(data, beta):\n avg = 0\n maverages = []\n for i in range(len(data)):\n avg = avg * beta + (1 - beta) * data[i]\n maverages.append(avg / (1 - (beta ** (i + 1))))\n return maverages", "def _EMA(vec, win):\n\n # assert win>0 and win<len(vec), \"the size of EMA window is not allowed\"\n alpha = 2/(1+win)\n if win == 1 or len(vec) == 1:\n k = 1\n else: # why we need this?? -- to save memory using approximately \n err = 0.000001\n k = np.ceil(np.log(err) / np.log(1-alpha))\n\n N = int(min(len(vec), k))\n ema_series = [0 for _ in range(N)]\n ema_series[0] = vec[0]\n for i in range(1,N):\n ema_series[i] = alpha*vec[i] + (1-alpha)*ema_series[i-1]\n return ema_series", "def EMA(self, n=SHORT_TERM_PERIOD):\n prices = self.df.close\n\n ema = prices.ewm(span=n, adjust=False).mean()\n\n self.df[\"ema\"] = ema\n\n return ema", "def exponentialMovingAverage(self,new,old, alpha):\n return alpha * new + (1 - alpha)* old", "def moving_averages(ts_init, window):\n ts_init = pd.Series(ts_init)\n if len(ts_init) % 2 == 0:\n ts_ma = ts_init.rolling(window, center=True).mean()\n ts_ma = ts_ma.rolling(window=2, center=True).mean()\n ts_ma = np.roll(ts_ma, -1)\n else:\n ts_ma = ts_init.rolling(window, center=True).mean()\n return ts_ma" ]
[ "0.74028605", "0.7296576", "0.72747", "0.7268313", "0.72586125", "0.71004415", "0.7091385", "0.7057606", "0.70309013", "0.69481266", "0.69246125", "0.6874147", "0.67940295", "0.6785554", "0.6768282", "0.6737907", "0.6717414", "0.6709948", "0.6671454", "0.66080034", "0.65956146", "0.6540976", "0.6488514", "0.6481339", "0.6421986", "0.64001065", "0.63762283", "0.6369387", "0.63547015", "0.6352273" ]
0.7348136
1
Update the snapshot so it'll only have the fields described in config.
def filter_snapshot(snap, config): for field in ALL_FIELDS: if field not in config: snap.ClearField(field)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def snapshot(self):\n pass", "def serialize_snapshot(self, snapshot, fields=None, version=None):\n fields = fields or self.snapshot_fields\n version = version or self.snapshot_version\n serialized_snapshot = serializers.serialize(\n 'python', [snapshot], fields=fields\n )[0]\n serialized_snapshot['version'] = version\n serialized_snapshot['extra_fields'] = {}\n return serialized_snapshot", "def deserialize_snapshot(self, serialized_snapshot):\n snapshot = list(serializers.deserialize(\n 'python', [serialized_snapshot]\n ))[0].object\n snapshot.__version__ = serialized_snapshot['version']\n snapshot.__extra_fields__ = serialized_snapshot['extra_fields']\n # override extra fields\n for name, value in serialized_snapshot['extra_fields'].items():\n if value:\n if isinstance(value, dict):\n value = self.deserialize_snapshot(value)\n setattr(snapshot, name, value)\n return snapshot", "def snapshot(self):\n self._client.snapshot()", "def snapshot(self):\n for ref, (m, _) in self._models.items():\n m.snapshot = not m.snapshot\n (self, root, doc, comm) = state._views[ref]\n if comm and 'embedded' not in root.tags:\n push(doc, comm)", "def snapshot(self, snapshot):\n self._context[\"snapshot\"] = snapshot", "def update_json(self):\n self.set_version_to_default()\n self.remove_null_fields()\n self.remove_unnecessary_keys()\n self.set_fromVersion(from_version=self.from_version)", "def snapshot(self):\n if not self.attached:\n raise CastleCollectionNotAttachedException()\n try:\n old_v = castle_collection_snapshot(self.castle.conn, self.coll_id)\n #once we are capable of updating the current_version after a snapshot, the following assertion will apply\n #if old_v != self.version_id:\n # raise Exception\n self.version_id = old_v #this is technically wrong, but is less wrong than not updating it at all\n pycastle_log.info(\"Snapshotting collection {0} (coll_id={1}, version_id={2})\".format(self.name, self.coll_id, self.version_id))\n except Exception, e:\n pycastle_log.error(str(self)+\" got exception {0}:{1}\".format(type(e), e))\n raise", "def with_config_update(self):\n original_config = self.load_config()\n\n config_data = original_config.json\n if str(self.ITEM_PUBLIC_ID) in config_data[f\"{self.ITEM_TYPE}s\"]:\n config_data[f\"{self.ITEM_TYPE}s\"].remove(str(self.ITEM_PUBLIC_ID))\n config_data[f\"{self.ITEM_TYPE}s\"].append(\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:0.0.1\"\n )\n self.dump_config(AgentConfig.from_json(config_data))\n try:\n yield\n finally:\n self.dump_config(original_config)", "def take_snapshot(self):\r\n self.snapshot = self.gain, self.block, self.locked, self.bucket_num", "def get_params_snapshot(self):\n ...", "def snapshot(self, instance, name):\n # TODO(imsplitbit): Need to implement vzdump\n pass", "def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.gain = self.snapshot[0]\r\n self.block = self.snapshot[1]\r\n self.locked = self.snapshot[2]\r\n self.bucket_num = self.snapshot[3]", "def update(self):\n self.save_config_file()", "def configuration_snapshot(self) -> bool:\n return pulumi.get(self, \"configuration_snapshot\")", "def configuration_snapshot(self) -> bool:\n return pulumi.get(self, \"configuration_snapshot\")", "def conf_update(self):\n pass", "def snapshot_info(self) -> MetaFile:\n raise NotImplementedError", "def refresh(self):\r\n self.metadata = self.db.read(self.path).json()", "def test_partial_update_metadata(self):\n pass", "def _pre_snapshot_hook(self):\n\n # Add all current networks to the dirty set, so that we will stop their\n # Dnsmasqs if no longer needed. Also remove all port and subnet\n # information.\n LOG.debug(\"Reset cache for new snapshot\")\n for network_id in list(self.agent.cache.get_network_ids()):\n self.dirty_networks.add(network_id)\n _fix_network_cache_port_lookup(self.agent, network_id)\n self.agent.cache.put(empty_network(network_id))\n\n # Suppress Dnsmasq updates until we've processed the whole snapshot.\n self.suppress_dnsmasq_updates = True\n return None", "def snapshot(self):\n net = self.solver.net\n \n if cfg.TRAIN.BBOX_REG:\n orig_0 = []; orig_1 = [];\n # save original values\n for i, bbox_pred_param_name in enumerate(cfg.TRAIN.BBOX_PRED_PARAM_NAMES):\n print 'adjusting {} parameters'.format(bbox_pred_param_name)\n orig_0.append(net.params[bbox_pred_param_name][0].data.copy())\n orig_1.append(net.params[bbox_pred_param_name][1].data.copy())\n\n # scale and shift with bbox reg unnormalization; then save snapshot\n for i, bbox_pred_param_name in enumerate(cfg.TRAIN.BBOX_PRED_PARAM_NAMES):\n net.params[bbox_pred_param_name][0].data[...] = \\\n (net.params[bbox_pred_param_name][0].data *\n self.bbox_stds[:, np.newaxis])\n net.params[bbox_pred_param_name][1].data[...] = \\\n (net.params[bbox_pred_param_name][1].data *\n self.bbox_stds + self.bbox_means)\n\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX\n if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')\n filename = (self.solver_param.snapshot_prefix + infix +\n '_iter_{:d}'.format(self.solver.iter) + '.caffemodel')\n filename = os.path.join(self.output_dir, filename)\n\n net.save(str(filename))\n print 'Wrote snapshot to: {:s}'.format(filename)\n\n if cfg.TRAIN.BBOX_REG:\n # restore net to original state\n for i, bbox_pred_param_name in enumerate(cfg.TRAIN.BBOX_PRED_PARAM_NAMES):\n net.params[bbox_pred_param_name][0].data[...] = orig_0[i]\n net.params[bbox_pred_param_name][1].data[...] = orig_1[i]", "def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.max_gain = self.snapshot[0]\r\n self.array = self.snapshot[1]\r\n self.free_cell_list = self.snapshot[2]", "def _instrument_config_dirty(self):\n # Refresh the param dict cache\n\n self._update_params()\n\n startup_params = self._param_dict.get_startup_list()\n log.debug(\"Startup Parameters: %s\", startup_params)\n\n for param in startup_params:\n if (self._param_dict.get(param) != self._param_dict.get_config_value(param)):\n log.debug(\"DIRTY: %s %s != %s\", param, self._param_dict.get(param), self._param_dict.get_config_value(param))\n return True\n\n log.debug(\"Clean instrument config\")\n return False", "def edit_snapshot(self) -> Generator[Snapshot, None, None]:\n with self.edit(Snapshot.type) as snapshot:\n if not isinstance(snapshot, Snapshot):\n raise RuntimeError(\"Unexpected snapshot type\")\n yield snapshot", "def merge(self, snapshot):\n\n if not self.__settings:\n return\n\n self.merge_metric_stats(snapshot)\n self._merge_transaction_events(snapshot)\n self._merge_synthetics_events(snapshot)\n self._merge_error_events(snapshot)\n self._merge_error_traces(snapshot)\n self._merge_custom_events(snapshot)\n self._merge_span_events(snapshot)\n self._merge_sql(snapshot)\n self._merge_traces(snapshot)", "def take_snapshot(self):\r\n self.snapshot = self.name, self.size, copy.copy(self.cells)\r\n self.bucket_array.take_snapshot()", "def take_snapshot(self):\r\n self.snapshot = self.blockA, self.blockB, self.blockA_locked, self.blockB_locked, self.blockA_free, \\\r\n self.blockB_free, copy.copy(self.blockA_cells), copy.copy(self.blockB_cells), self.cut", "def _post_submit(self):\n if not isinstance(self._snapshots, bool):\n self.snapshot(self._snapshots)\n\n self.update(True)", "def update(self):\n if self._var_id == UTILISATION_MONITOR_VERSION:\n version = dockerVersion(self._api)\n self._state = version.get('version', None)\n self._attributes['api_version'] = version.get('api_version', None)\n self._attributes['os'] = version.get('os', None)\n self._attributes['arch'] = version.get('arch', None)" ]
[ "0.62075865", "0.5945219", "0.5891311", "0.55987465", "0.55950725", "0.55518705", "0.5527521", "0.54823714", "0.5481986", "0.546261", "0.54572856", "0.54442203", "0.54154104", "0.53831637", "0.53712493", "0.53712493", "0.5336485", "0.5313484", "0.53123206", "0.52995414", "0.5268766", "0.5261925", "0.5255823", "0.52432454", "0.52309275", "0.52203107", "0.51944196", "0.51892155", "0.51866364", "0.5162138" ]
0.6623801
0
Upload the sample from the file to the server, using the hello>config>snapshot protocol.
def upload_sample(host, port, path, read_type='protobuf'): with Connection.connect(host, port) as conn: zipped = (path.endswith(".gz")) r = Reader(path, read_type, zipped) hello = r.read_hello() hello_bytes = hello.SerializeToString() num_snapshot = 1 for snap in r: send_hello(conn, hello_bytes) config_bytes = get_config(conn) config = Config.deserialize(config_bytes) filter_snapshot(snap, config) send_snapshot(conn, snap.SerializeToString()) num_snapshot += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_samples(args):\n clarity_epp.upload.samples.from_helix(lims, config.email, args.input_file)", "def upload_sample(self, file):\n return self._upload_sample(file)", "def upload_samples():\n # Retrieve a list of all files and paths within the target\n paths = Path(Config.target_dir).glob(Config.target_pattern)\n # Inform the user as to what we're doing\n logger.info(\"Assembling %s volume for submission\", Config.target_dir)\n # Loop through each identified file and upload it to the sandbox for analysis\n for path in paths:\n # Convert the path to a string\n filepath = str(path)\n # Grab the file name\n filename = os.path.basename(filepath)\n # Open the file for binary read, this will be our payload\n with open(filepath, 'rb') as upload_file:\n payload = upload_file.read()\n # Upload the file using the Sandbox\n response = Samples.upload_sample(file_name=filename, sample=payload)\n # Grab the SHA256 unique identifier for the file we just uploaded\n sha = response[\"body\"][\"resources\"][0][\"sha256\"]\n # Add this SHA256 to the volume payload element\n Analyzer.uploaded.append(sha)\n # Track the upload so we can remove the file when we're done\n Analyzer.files.append([filename, filepath, sha])\n # Inform the user of our progress\n logger.debug(\"Uploaded %s to %s\", filename, sha)", "def upload(self):\n # TODO: Should CD to the working directory set by the robscript.\n src = self.state_frame[0]\n dest = self.state_frame[1]\n self.send_upload(src, dest, True, None)\n self.state = STATE_READ_LINE", "def sample(config, samples):\n url = get_api_path('sample.json')\n multiple_files = []\n images = [s['image'] for s in samples]\n labels = [s['label'] for s in samples]\n for image in images:\n multiple_files.append(('images', (image, open(image, 'rb'), 'image/png')))\n headers=get_headers(no_content_type=True)\n headers[\"config\"]= json.dumps(config, cls=HCEncoder)\n headers[\"labels\"]= json.dumps(labels)\n\n try:\n r = requests.post(url, files=multiple_files, headers=headers, timeout=30)\n return r.text\n except requests.exceptions.RequestException:\n e = sys.exc_info()[0]\n print(\"Error while calling hyperchamber - \", e)\n return None", "def sample_file(self, sample_file: str):\n\n self._sample_file = sample_file", "def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise", "def post(self):\n filename = str(time.time())\n filepath = os.path.join(\n os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n with open(filepath, 'bw') as uploadfile:\n chunk_size = 1024\n while True:\n chunk = request.stream.read(chunk_size)\n if len(chunk) == 0:\n break\n uploadfile.write(chunk)\n current_app.logger.info('file %s upload successfully', filename)\n return {'timestamp': filename}, http.HTTPStatus.CREATED", "def upload_sample(a1000):\n file_entry = demisto.getFilePath(demisto.getArg('entryId'))\n\n try:\n with open(file_entry['path'], 'rb') as f:\n response_json = a1000.upload_sample_from_file(f,\n custom_filename=file_entry.get('name'),\n tags=demisto.getArg('tags'),\n comment=demisto.getArg('comment')).json()\n except Exception as e:\n return_error(str(e))\n\n markdown = f'''## ReversingLabs A1000 upload sample\\n **Message:** {response_json.get('message')}\n **ID:** {demisto.get(response_json, 'detail.id')}\n **SHA1:** {demisto.get(response_json, 'detail.sha1')}\n **Created:** {demisto.get(response_json, 'detail.created')}'''\n\n command_result = CommandResults(\n outputs_prefix='ReversingLabs',\n outputs={'a1000_upload_report': response_json},\n readable_output=markdown\n )\n\n file_result = fileResult('Upload sample report file', json.dumps(response_json, indent=4),\n file_type=EntryType.ENTRY_INFO_FILE)\n\n return [command_result, file_result]", "def upload_file(name):\n subprocess.check_output(cmd_preamble + [\"cp\", name, f\"jot://{name}\"])", "def test_send_second_file():\n\n # Generate the blocks for the test file which is not present on the server\n test_file = os.path.join(os.path.dirname(__file__),\n \"../test_files/debashis-rc-biswas-3U4gGsGNsMY-unsplash.jpg\")\n # Ask the server for the hash of the last block\n response = client.get(\"/latest_block_hash\")\n last_block_hash = response.json()[\"last_block_hash\"]\n blocks = generate_blocks(test_file, last_block_hash)\n # Collect all blocks into a single binary file using pickle\n blocks_pickled = pickle.dumps(blocks)\n # Send the collected blocks in a single transfer to the test server\n response = client.post(\"/send\",\n files={\"file\": blocks_pickled})\n assert response.ok\n assert response.json() \\\n == {\"success\": True,\n \"new_file\": True,\n \"hash\": \"415d4f66e1b8b9083014dcdca5ddd7d1dcca3f5a4a120603169b951b1c5fa0c9\",\n \"index_all\": 1704}", "def example(self):\n self.add_file_string('Example file')\n self.should_copy = False", "def upload_bucket_samples():\n if not Config.region:\n logger.error(\"You must specify a region in order to scan a bucket target\")\n raise SystemExit(\n \"Target region not specified. Use -r or --region to specify the target region.\"\n )\n # Connect to S3 in our target region\n s_3 = boto3.resource(\"s3\", region_name=Config.region)\n # Connect to our target bucket\n bucket = s_3.Bucket(Config.target_dir)\n # Retrieve a list of all objects in the bucket\n summaries = bucket.objects.all()\n # Inform the user as this may take a minute\n logger.info(\"Assembling volume from target bucket (%s) for submission\", Config.target_dir)\n # Loop through our list of files, downloading each to memory then upload them to the Sandbox\n for item in summaries:\n # Grab the file name from the path\n filename = os.path.basename(item.key)\n # Teensy bit of witch-doctor magic to download the file\n # straight into the payload used for our upload to the Sandbox\n response = Samples.upload_sample(file_name=filename,\n file_data=io.BytesIO(\n bucket.Object(key=item.key).get()[\"Body\"].read()\n )\n )\n # Retrieve our uploaded file SHA256 identifier\n sha = response[\"body\"][\"resources\"][0][\"sha256\"]\n # Add this SHA256 to the upload payload element\n Analyzer.uploaded.append(sha)\n # Track the upload so we recognize the file when we're done\n Analyzer.files.append([filename, item.key, sha])\n # Inform the user of our progress\n logger.debug(\"Uploaded %s to %s\", filename, sha)", "def test_put_file(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n path = '/'.join(backend.id_to_path(id)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path))", "async def test_upload_file(self):\n rsps = respx.put(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id/servers.dat') \\\n .mock(return_value=Response(204))\n with mock.patch('__main__.open', new=mock.mock_open(read_data='test')) as file:\n file.return_value = json.dumps('test').encode()\n await provisioning_client.upload_provisioning_profile_file('id', 'servers.dat', file())\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id/servers.dat'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'", "def copy_file_to_server():\r\n utils.system_output('mv /home/chronos/user/Downloads/* /usr/local/autotest/results/default/',ignore_status=True)\r\n logging.info(\"Video Copied to Log location\")", "def test_publish_path(self):\n\n self.create_sample_data_set_dir('node59p1_step1.dat', TELEM_DIR, \"node59p1.dat\")\n\n self.assert_initialize()\n\n try:\n # Verify we get one sample\n result = self.data_subscribers.get_samples(DataParticleType.CONTROL, 1)\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE, 1)\n result.extend(result1)\n log.info(\"RESULT: %s\", result)\n\n # Verify values\n self.assert_data_values(result, 'test_data_1.txt.result.yml')\n except Exception as e:\n log.error(\"Exception trapped: %s\", e)\n self.fail(\"Sample timeout.\")", "async def insert_file(self, file_name: FormalName, desired_copies: int, packets: int, size: int, fetch_prefix: FormalName):\n # send command interest\n file = File()\n file.file_name = file_name\n file.desired_copies = desired_copies\n file.packets = packets \n file.size = size\n fetch_path = FetchPath()\n fetch_path.prefix = fetch_prefix\n cmd = RepoCommand()\n cmd.file = file\n cmd.sequence_number = 0\n cmd.fetch_path = fetch_path\n cmd_bytes = cmd.encode()\n\n # publish msg to repo's insert topic\n await self.pb.wait_for_ready()\n print(Name.to_str(self.repo_prefix + ['insert']))\n is_success = await self.pb.publish(self.repo_prefix + ['insert'], cmd_bytes)\n if is_success:\n logging.info('Published an insert msg and was acknowledged by a subscriber')\n else:\n logging.info('Published an insert msg but was not acknowledged by a subscriber')\n return is_success", "def upload_file(self, file_path, file_name, output_path):", "def _call_init_upload(\n file_name, file_size, metadata, tags, project, samples_resource, sample_id, external_sample_id\n):\n upload_args = {\n \"filename\": file_name,\n \"size\": file_size,\n \"upload_type\": \"standard\", # this is multipart form data\n \"sample_id\": sample_id,\n \"external_sample_id\": external_sample_id,\n }\n\n upload_args.update(build_upload_dict(metadata, tags, project))\n\n try:\n return samples_resource.init_upload(upload_args)\n except requests.exceptions.HTTPError as e:\n raise_api_error(e.response, state=\"init\")\n except requests.exceptions.ConnectionError:\n raise_connectivity_error(file_name)", "def upload_file(command):\n if 'action' not in command or command['action']!=\"UPLOAD\":\n raise ValueError(\"Command not of type UPLOAD\")\n if 'file_pattern' not in command: \n raise ValueError(\"Missing file pattern\")\n path = command['file_pattern'] \n if not os.path.exists(path):\n raise ValueError(\"No valid file for upload found\")\n returner={}\n handler = Layer1(aws_access_key_id = command['access_key'],aws_secret_access_key = command['secret_access_key'],region_name=command['region_name'])\n uploader = ConcurrentUploader(handler,command['vault_name'],part_size=uchunk)\n file_size = os.path.getsize(path)\n if file_size==0:\n raise ValueError(\"File is empty. Nothing to upload.\")\n csum = chunkedmd5(path)\n itime=time.time()\n file_name = os.path.basename(path)\n machine_id = str(command['target']) if client_name == '' else client_name+' ('+str(command['target']) + ')'\n #Construct a meaningful description object for the file\n #The limits are that the description can be no more than 1024 characters in length and must use only ascii characters between 32 and 126 (i.e., 32<=ord(char)<=126)\n dscrip = command['description']+'\\\\n'\n dscrip = dscrip + \"Uploaded at \"+str(itime)+'\\\\n'+ \"Full path \"+str(path)+'\\\\n'+ \"File size \"+str(file_size)+'\\\\n' + \"MD5 \"+str(csum)+'\\\\n' + \"Source machine id \"+machine_id+'\\\\n'\n print \"Uploading file %s\"%file_name\n #Put some validation stuff here...\n #Do the upload\n archive_id = uploader.upload(path,dscrip)\n print \"Completed successfully. Archive ID: %s\"%archive_id\n #Done the upload, send the bastard back\n returner['archive_id'] = archive_id\n returner['description'] = dscrip\n returner['file_name'] = file_name\n returner['true_path'] = path\n returner['file_size'] = file_size\n returner['md5sum'] = csum\n returner['insert_time']=int(itime)\n returner['region_name']=command['region_name']\n returner['vault_name'] = command['vault_name']\n return returner", "def test_upload_binary(self):\n uploadFile = os.path.join(testdatadir, \"upload.data.gz\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED", "def test_send_positive():\n\n # Generate the blocks for the test file\n test_file = os.path.join(os.path.dirname(__file__),\n \"../test_files/isaac-martin-61d2hT57MAE-unsplash.jpg\")\n blocks = generate_blocks(test_file, '0')\n # Collect all blocks into a single binary file using pickle\n blocks_pickled = pickle.dumps(blocks)\n # Send the collected blocks in a single transfer to the test server\n response = client.post(\"/send\",\n files={\"file\": blocks_pickled})\n assert response.ok\n assert response.json() \\\n == {\"success\": True,\n \"new_file\": True,\n \"hash\": \"45f293033312d42815155e871f37b56b4de9b925c07d4a5f6262320c1627db12\",\n \"index_all\": 5285}", "def test_put_file_variant(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put_variant(src, id, 'variant.tar.gz')\n path = '/'.join(backend.id_to_path(id)) + '/variant.tar.gz'\n self.assertTrue(backend.exists(path))", "def upload(self, data: dict, replace: bool = False):\n for sample_data in data[\"samples\"]:\n chanjo_sample = self.chanjo_api.sample(sample_data[\"sample\"])\n if chanjo_sample and replace:\n self.chanjo_api.delete_sample(sample_data[\"sample\"])\n elif chanjo_sample:\n LOG.warning(\"sample already loaded, skipping: %s\", sample_data[\"sample\"])\n continue\n\n LOG.debug(\"upload coverage for sample: %s\", sample_data[\"sample\"])\n self.chanjo_api.upload(\n sample_id=sample_data[\"sample\"],\n sample_name=sample_data[\"sample_name\"],\n group_id=data[\"family\"],\n group_name=data[\"family_name\"],\n bed_file=sample_data[\"coverage\"],\n )", "def upload(self, name, filepath):\n upload_id = self._client.push(filepath)\n\n endpoint = '/v1/charm/{}/revisions'.format(name)\n response = self._client.post(endpoint, {'upload-id': upload_id})\n status_url = response['status-url']\n logger.debug(\"Upload %s started, got status url %s\", upload_id, status_url)\n\n while True:\n response = self._client.get(status_url)\n logger.debug(\"Status checked: %s\", response)\n\n # as we're asking for a single upload_id, the response will always have only one item\n (revision,) = response['revisions']\n status = revision['status']\n\n if status in UPLOAD_ENDING_STATUSES:\n return Uploaded(\n ok=UPLOAD_ENDING_STATUSES[status],\n status=status, revision=revision['revision'])\n\n # XXX Facundo 2020-06-30: Implement a slight backoff algorithm and fallout after\n # N attempts (which should be big, as per snapcraft experience). Issue: #79.\n time.sleep(POLL_DELAY)", "def save_sample(request, sample, file_type):\n \n # Save the sample in the files directory.\n sample.save(_samples_folder)\n \n # Add a record for the file in the database.\n File(file_name=sample.identifier, file_type=file_type).save()\n \n # Return a valid response with the sample's ID.\n response_content = {'Valid': True, 'ID': sample.identifier}\n return HttpResponse(json.dumps(response_content))", "def upload():\n storeapps = APP.config[\"storage\"]\n binary = request.data\n\n # Add compatibility with POST requests\n if 'file' in request.files:\n binary = request.files['file'].read()\n\n logging.debug(\"Received file with size: %i\", len(binary))\n\n try:\n app = nativeapps.application.from_binary(binary)\n filepath = app.write(storeapps)\n return \"written: \" + filepath, 201 # 201 CREATED\n except nativeapps.application.InvalidApplicationError as exception:\n return exception, 400", "def upload_samples(self, file):\n result = self._upload_sample(file)\n if \"samples\" in result:\n return result[\"samples\"]\n else:\n return [result]", "def test_write_source(self):\n req = Request()\n for name in sample_data.keys():\n orig_fn = self._filepath(name)\n temp_fn = self._filepath(name + '-write-source')\n\n # Read the message\n resp = req.get(fromfile=orig_fn)\n\n # Write to a temporary JSON file\n resp.write_source(temp_fn)\n\n # Read the two files and compare JSON (ignores ordering)\n with open(orig_fn) as orig, open(temp_fn) as temp:\n assert json.load(orig) == json.load(temp)\n\n # Delete the temporary file\n os.remove(temp_fn)" ]
[ "0.6297403", "0.6237045", "0.58038926", "0.5784324", "0.56973356", "0.562496", "0.56072026", "0.55800873", "0.55693907", "0.5512037", "0.547841", "0.5458376", "0.54101336", "0.54032856", "0.53795666", "0.53617495", "0.5348523", "0.5347984", "0.53423697", "0.5332347", "0.53312343", "0.5324884", "0.53240216", "0.5298022", "0.5294373", "0.5288605", "0.5287585", "0.52812254", "0.5276495", "0.52763265" ]
0.681721
0
Instantiate PSQL db connection and return db cursor with given query result
def db_connection(query): db = psycopg2.connect(database=DBNAME) c = db.cursor() c.execute(query) return c.fetchall() db.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def psql_connection(query):\n try:\n conn = psycopg2.connect(database=DB)\n cur = conn.cursor()\n cur.execute(query)\n except Exception:\n (\"Error connecting to database\")\n else:\n print(\"Calling database...\")\n print(\"\")\n results = cur.fetchall()\n conn.close()\n return results", "def oracle_cursor(query):\n conf_file_dir = os.path.dirname(os.path.realpath(__file__))\n conf_file = conf_file_dir + os.sep + '.setup.conf'\n (username, password, host, port, sid) = credential_setup(conf_file)\n dsn_tns = cx_Oracle.makedsn(host, port, sid)\n connection = cx_Oracle.connect(username, password, dsn_tns)\n cursor = connection.cursor()\n cursor.execute(query)\n\n return cursor", "def connect(sql, query, conn):\n cursor = conn.cursor() # Return the cursor and use it to perform queries.\n\n # Execute the query.\n if query == 'no_query': # If we're just running an SQL query without a variable, only execute the sql\n cursor.execute(sql)\n else:\n cursor.execute(sql, query) # If we have a variable (e.g. an FBgn) be sure to include it in the execute command.\n \n records = cursor.fetchall() # Grab the results.\n cursor.close() # Close the cursor.\n return records # Return a list of tuples.", "def run_query(query):\n conn = connection.get_db_connection()\n cursor = conn.cursor()\n cursor.execute(query)\n return cursor", "def query_to_cur(dbh, query, verbose=verbose):\n if verbose : \n print query\n cur = dbh.cursor()\n cur.execute(query)\n\n return cur", "def conn_curs():\n dbname = \"abcdefgh\"\n password = \"Acka-1jfue4-snYmkall\"\n host = \"db.elephantsql.com\"\n\n connection = psycopg2.connect(dbname=dbname, user=dbname,\n password=password, host=host)\n cursor = connection.cursor()\n return connection, cursor", "def query_to_cur(dbh, qry, args):\n if args.debug:\n print(datetime.datetime.strftime(datetime.datetime.now(), \"%D %H:%m:%S\"), qry, file=sys.stderr)\n t0 = time.time()\n cur = dbh.cursor()\n cur.execute(qry)\n print(\"query took\", time.time() - t0, \"seconds\")\n return cur", "def cursor():\n dbh = handle()\n return dbh.cursor()", "def db_create_cursor(self, database_name):\n\n cursor = self.connections[database_name].cursor()\n return cursor", "def connect_to_db(query=None):\n conn = None\n cursor = None\n DB_URL = app.config[\"DATABASE_URI\"]\n try:\n # connect to db\n conn = psycopg2.connect(DB_URL)\n print(\"\\n\\nConnected {}\\n\".format(conn.get_dsn_parameters()))\n cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n if query:\n # Execute query\n cursor.execute(query)\n # Commit changes\n conn.commit()\n\n except(Exception,\n psycopg2.DatabaseError,\n psycopg2.ProgrammingError) as error:\n print(\"DB ERROR: {}\".format(error))\n return conn, cursor", "def connect_to_db(cls):\n conn = psycopg2.connect(os.environ['DATABASE_URL'])\n conn.autocommit = True\n cursor = conn.cursor()\n\n return cursor", "def create_cursor(self):\r\n cursor = self.connection.cursor()\r\n return cursor", "def __enter__(self) -> 'cursor':\n self.conn = cx_Oracle.connect(self.configuration)\n self.cursor = self.conn.cursor()\n return self.cursor", "def sqlconnect(host, database, user, password):\r\n # Assert valid connection\r\n conn = pg.connect(host = host, database=database, user = user, password = password)\r\n cur = conn.cursor()\r\n return cur", "def __enter__(self) -> 'DBcursor':\n self.conn = connector.connect(**self.dbconfig)\n self.cursor = self.conn.cursor()\n return self.cursor", "def conn(self):\n self.cnx = psycopg2.connect(**self.dbConfig) \n self.cur = self.cnx.cursor()", "def get_database_cursor(conn=None):\n\n if not conn:\n conn = get_database_connection()\n\n return conn.cursor()", "def db_execute_query(db_connection, query, query_args):\n cursor = db_connection.cursor()\n #datalab_logger_connections.info(\"reading database[Query. May Take Time]...\")\n cursor.execute(query, query_args)\n #datalab_logger_connections.info(\"finish to query database\")\n return cursor", "def __enter__(self) -> 'cursor':\n self.conn = pymysql.connect(self.configuration)\n #self.conn = pyodbc.connect(self.configuration)\n self.cursor = self.conn.cursor()\n return self.cursor", "def __cursor(cls):\n print('|-- Richiesta cursore da:'+str(cls.__dbCon))\n return cls.__dbCon.cursor( cursor_factory = psycopg2.extras.DictCursor )", "def connect_database(db_name):\n global CON\n if not CON:\n CON = psycopg2.connect(\"dbname ='%s' user='%s' host=/tmp/\" % (db_name, USER))\n cur = CON.cursor()\n return CON, cur", "def execute(cursor, query):\n while True:\n try:\n cursor.execute(query)\n break\n except Exception as e:\n print(\"Database query: {} {}\".format(cursor, query))\n print(\"Database retry reason: {}\".format(e))\n return cursor", "def database_connection():\n conn = psycopg2.connect(\n host=server,\n database=database,\n user=postgres_username,\n password=postgres_password,\n port=port)\n cursor = conn.cursor()\n return conn, cursor", "def connect_db(query=None, DB_URL=None):\n conn = None\n cursor = None\n if DB_URL is None:\n DB_URL = os.getenv('DATABASE_URL') \n print(DB_URL)\n\n try:\n # connect to db\n conn = psycopg2.connect(DB_URL)\n print(\"\\n\\nConnected {}\\n\".format(conn.get_dsn_parameters()))\n cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n if query:\n \n cursor.execute(query)\n conn.commit()\n\n except(Exception,\n psycopg2.DatabaseError,\n psycopg2.ProgrammingError) as error:\n print(\"DB ERROR: {}\".format(error))\n\n return conn, cursor", "def execute(cls, sql):\n cursor = cls.get_conn().cursor()\n cursor.execute(sql)\n return cursor", "def query_db(query):\n try:\n db = psycopg2.connect(database=DATABASE_NAME)\n c = db.cursor()\n c.execute(query)\n a = c.fetchall()\n db.close()\n return a\n except Exception:\n return None", "def get_cursor():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect_db()\n return db.cursor()", "def connect(db_name=\"tournament\"):\n try:\n conn = psycopg2.connect(\"dbname={}\".format(db_name))\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n return conn, cur\n except:\n print(\"Error connecting to the \" + db_name + \" database.\")", "def _get_cursor(self):\n conn = self._connect()\n conn.autocommit = True\n cursor = conn.cursor()\n return cursor", "def db_query(query, db_name, user):\n\n # Attempt connection to DB with given parameters\n try:\n conn = psycopg2.connect(dbname=db_name, user=user)\n except psycopg2.Error as e:\n print(e)\n raise SystemExit\n\n # Create cursor and try to execute given query\n # and return fetched data\n cur = conn.cursor()\n try:\n cur.execute(query)\n except psycopg2.Error as e:\n print(e)\n conn.close()\n else:\n return cur.fetchall()\n cur.close()\n conn.close()" ]
[ "0.7395502", "0.69118047", "0.6840957", "0.68388003", "0.6829422", "0.68154687", "0.6758602", "0.6728761", "0.6728179", "0.6697794", "0.6604093", "0.6601227", "0.6562602", "0.6542697", "0.6531216", "0.6530874", "0.6519317", "0.6513908", "0.6486032", "0.6468667", "0.6456323", "0.6450232", "0.64442265", "0.6434994", "0.6426576", "0.6425318", "0.6425063", "0.64242405", "0.64218205", "0.6416528" ]
0.7045057
1
Test all API urls
def test_urls(self): self.base_check_request("get", "/") self.base_check_request("get", "apartments/") self.base_check_request("get", "complexes/") self.base_check_request("get", "locations/") self.base_check_request("get", "companies/") self.base_check_request("get", "companies-types/") self.base_check_request("get", "count/apartments/") self.base_check_request("get", "count/complexes/") self.base_check_request("get", "search-forms/apartments/") self.base_check_request("get", "search-forms/complexes/") self.base_check_request("get", "search-forms/main/") self.base_check_request("get", "autocomplete/companies/") self.base_check_request("get", "autocomplete/complexes/") self.base_check_request("get", "autocomplete/locations/") self.base_check_request("get", "apartments_for_maps/?count=1&fields=lat,lon") # self.base_check_request("get", "reserve/") # self.base_check_request("get", "complain/") # self.base_check_request("post", "apartment-complain/") # self.base_check_request("post", "order-apartment/")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_smoke_test(self):\n urls = [ ]\n urls.append('/')\n urls.append(reverse('api_doc'))\n urls.append(reverse('laws'))\n urls.append(reverse('issue_list_user', args=['test0']))\n\n for url in urls:\n response = self.client.get(url)\n self.assertEqual(response.status_code , 200)", "def test_get_api_resources(self):\n pass", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"", "def test_urls(self):\n for endpoint in [FONTS_BUNDLE_URL, JS_BUNDLE_URL]:\n try:\n res = requests.get(endpoint, timeout=10)\n self.assertEqual(res.status_code, 200)\n except requests.exceptions.ReadTimeout:\n # Don't fail on a timeout, sometimes unpkg can be slow\n pass", "def test_urls_work(url):\n with requests.get(url) as r:\n assert r.status_code == 200", "def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )", "def testApi(self):", "def test_url_construction(self):\n\n a = api.InvenTreeAPI(\"http://localhost:1234\", connect=False)\n\n tests = {\n 'part': 'http://localhost:1234/api/part/',\n '/part': 'http://localhost:1234/api/part/',\n '/part/': 'http://localhost:1234/api/part/',\n 'order/so/shipment': 'http://localhost:1234/api/order/so/shipment/',\n }\n\n for endpoint, url in tests.items():\n self.assertEqual(a.constructApiUrl(endpoint), url)", "def test_all_http_stats(self):\n client = Client()\n response = client.get(reverse('home'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('browse_produce'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('browse_locations'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('search'))\n self.assertEqual(200, response.status_code)\n response = client.get(reverse('faq'))\n self.assertEqual(200, response.status_code)", "def test_all_endpoint_status():\n r = client.get('/openapi.json')\n assert r.status_code == 200\n for e in r.json()['paths'].keys():\n r = client.get(e)\n assert r.status_code == 200\n\n for e in ['plot']:\n r = client.get(e)\n assert r.status_code == 200", "def test_base_url(self):\n\n # Each of these URLs should be invalid\n for url in [\n \"test.com/123\",\n \"http://:80/123\",\n \"//xyz.co.uk\",\n ]:\n with self.assertRaises(Exception):\n a = api.InvenTreeAPI(url, connect=False)\n\n # test for base URL construction\n a = api.InvenTreeAPI('https://test.com', connect=False)\n self.assertEqual(a.base_url, 'https://test.com/')\n self.assertEqual(a.api_url, 'https://test.com/api/')\n\n # more tests that the base URL is set correctly under specific conditions\n urls = [\n \"http://a.b.co:80/sub/dir/api/\",\n \"http://a.b.co:80/sub/dir/api\",\n \"http://a.b.co:80/sub/dir/\",\n \"http://a.b.co:80/sub/dir\",\n ]\n\n for url in urls:\n a = api.InvenTreeAPI(url, connect=False)\n self.assertEqual(a.base_url, \"http://a.b.co:80/sub/dir/\")\n self.assertEqual(a.api_url, \"http://a.b.co:80/sub/dir/api/\")", "def api_test(count):\n # Log all API requests\n exception = log_api()\n if exception:\n return jsonify({'error': exception}), HTTPStatus.INTERNAL_SERVER_ERROR\n\n # Per the spec, path segments are used across all requests within a test\n path_segments = generate_path_segments()\n for i in range(0, count):\n # Randomly determine the number of segments in this request\n path_count = random.randrange(1, 7)\n\n # WARNING\n # host.docker.internal is NOT production safe.\n # The production domain should be taken from settings\n # or the environment.\n url = 'http://host.docker.internal:5000/api'\n\n while path_count > 0:\n url += '/{}'.format(path_segments[random.randrange(0, 3)])\n path_count -= 1\n url += '/'\n\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return jsonify({'error': 'request error'}), \\\n HTTPStatus.INTERNAL_SERVER_ERROR\n return '', HTTPStatus.OK", "def test_api_base(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url()))\n j = r.json()\n self.assertIn('gages', j)\n self.assertIn('sections', j)\n self.assertIn('regions', j)\n self.assertIn('rivers', j)\n self.assertIn('sensors', j)\n self.assertIn('samples', j)", "def test_api_samples(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('samples', r)", "def test_multiple_gets(uris):\n\n for uri in uris:\n print('='*10 + ' Try uri : {uri} '.format(uri=uri) + '='*10)\n resp = get_api_url(uri)\n print(resp)\n try:\n pprint(resp.json())\n except Exception as e:\n print(resp.text)", "def test_officer_access(self):\n self.client.login(self.officer.email)\n for url in self.urls_get:\n response = self.client.get(url, follow=False)\n self.assertEqual(200, response.status_code)\n for url in self.urls_post:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEquals(200, response.status_code)", "def test_base_url(self):\n r = self.base_check_request(\"get\", \"/\")\n\n base_urls = {\n 'apartments': self.build_url('apartments/'),\n 'companies': self.build_url('companies/'),\n 'companies-types': self.build_url('companies-types/'),\n 'complexes': self.build_url('complexes/'),\n 'locations': self.build_url('locations/')\n }\n self.assertDictEqual(r, base_urls)", "def test_list_endpoints(self):\n routes = [\n '/',\n '/npm/<name>',\n '/nuget/<name>',\n '/ping',\n '/ping/npm',\n '/ping/nuget',\n '/ping/pypi',\n '/ping/rubygems',\n '/pypi/<name>',\n '/rubygems/<name>',\n ]\n expected = {}\n for num, route in enumerate(routes):\n expected[str(num)] = route\n\n response = self.app.get('/')\n assert json.loads(response.data) == expected", "def test_api_lookup(self):\n\n # Set up the url for the api call\n\n expected_url = 'https://www.gov.uk/api/content{}'.format(self.urlsclass.dedupurls[0])\n\n # Make request and extract json.\n\n expected = requests.get(expected_url).json()\n\n assert api_lookup(self.urlsclass.dedupurls[0], 'https://www.gov.uk/api/content') == expected", "def test_apis_wo_auth(self):\n\n # Order list API\n url = reverse('orders-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Order summary API\n url = reverse('order-summary-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Order create API\n url = reverse('orders-list')\n response = self.client.post(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Shares list/summary API\n url = reverse('shares-list', args=['summary'])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n url = reverse('shares-list', args=['all'])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_main_route():\n response = client.get(url)\n assert response.status_code == 200", "def test_urls():\n assert len(urlpatterns) > 0", "def test_good_get_url(self):\n result = self._search('Love Story', just_results=True)\n get_url = result[0]['get_url']\n resp = self.app.get(get_url)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('url', resp.data)\n self.assertIn('/d?', resp.data)", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "def test_urls(self):\n base_test_url = 'http://{}:{}/'.format(TESTING_CONFIG['host'],\n TESTING_CONFIG['port'])\n self.conn._host_url == base_test_url\n self.conn.aheader_url == base_test_url + 'analysis_header'\n self.conn.atail_url == base_test_url + 'analysis_tail'\n self.conn.dref_url == base_test_url + 'data_reference'\n self.conn.dref_header_url == base_test_url + 'data_reference_header'", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_00_api_get(self):\r\n # GET as Anonymous\r\n url = '/api/'\r\n action = 'get'\r\n self.check_limit(url, action, 'app')", "def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)", "def test_url_path(self):\n response = self.client.get('/planner/recipes/')\n self.assertEqual(response.status_code, 200)" ]
[ "0.7839706", "0.7648727", "0.745794", "0.7436893", "0.731011", "0.71979964", "0.71921796", "0.7125115", "0.70737904", "0.7020486", "0.7014052", "0.6991493", "0.6971624", "0.6966466", "0.68834203", "0.6853621", "0.68137467", "0.68028086", "0.6798513", "0.6750252", "0.67475736", "0.6702363", "0.6687126", "0.6683521", "0.6644318", "0.6631584", "0.6631584", "0.6630637", "0.6590667", "0.6588608" ]
0.78406894
0
Test base API url
def test_base_url(self): r = self.base_check_request("get", "/") base_urls = { 'apartments': self.build_url('apartments/'), 'companies': self.build_url('companies/'), 'companies-types': self.build_url('companies-types/'), 'complexes': self.build_url('complexes/'), 'locations': self.build_url('locations/') } self.assertDictEqual(r, base_urls)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_base_url(self):\n\n # Each of these URLs should be invalid\n for url in [\n \"test.com/123\",\n \"http://:80/123\",\n \"//xyz.co.uk\",\n ]:\n with self.assertRaises(Exception):\n a = api.InvenTreeAPI(url, connect=False)\n\n # test for base URL construction\n a = api.InvenTreeAPI('https://test.com', connect=False)\n self.assertEqual(a.base_url, 'https://test.com/')\n self.assertEqual(a.api_url, 'https://test.com/api/')\n\n # more tests that the base URL is set correctly under specific conditions\n urls = [\n \"http://a.b.co:80/sub/dir/api/\",\n \"http://a.b.co:80/sub/dir/api\",\n \"http://a.b.co:80/sub/dir/\",\n \"http://a.b.co:80/sub/dir\",\n ]\n\n for url in urls:\n a = api.InvenTreeAPI(url, connect=False)\n self.assertEqual(a.base_url, \"http://a.b.co:80/sub/dir/\")\n self.assertEqual(a.api_url, \"http://a.b.co:80/sub/dir/api/\")", "def test_api_base(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url()))\n j = r.json()\n self.assertIn('gages', j)\n self.assertIn('sections', j)\n self.assertIn('regions', j)\n self.assertIn('rivers', j)\n self.assertIn('sensors', j)\n self.assertIn('samples', j)", "def api_url(url_base):\n return f\"{url_base}/api/v2\"", "def test_url_construction(self):\n\n a = api.InvenTreeAPI(\"http://localhost:1234\", connect=False)\n\n tests = {\n 'part': 'http://localhost:1234/api/part/',\n '/part': 'http://localhost:1234/api/part/',\n '/part/': 'http://localhost:1234/api/part/',\n 'order/so/shipment': 'http://localhost:1234/api/order/so/shipment/',\n }\n\n for endpoint, url in tests.items():\n self.assertEqual(a.constructApiUrl(endpoint), url)", "def test_api_object_base_url(self, api_object, server_address):\n expected_base_url = 'https://{}/api/domain/'.format(server_address)\n assert api_object.base_url == expected_base_url", "def test_get_base_url():\n eq_(get_base_url(\"http://foo.com/bar/baz\"), \"http://foo.com\")\n eq_(get_base_url(\"https://foo.com:443/foo/bar\"), \"https://foo.com:443\")", "def base_url():\n return json.loads('{\"message\": \"Try with /data\", \"success\": false}')", "def testApi(self):", "def test_get_api_resources(self):\n pass", "def base_url(self):\n return \"https://api.byte-stack.net\" if self.use_sandbox \\\n else \"https://api.ovo.id\"", "def test_api_lookup(self):\n\n # Set up the url for the api call\n\n expected_url = 'https://www.gov.uk/api/content{}'.format(self.urlsclass.dedupurls[0])\n\n # Make request and extract json.\n\n expected = requests.get(expected_url).json()\n\n assert api_lookup(self.urlsclass.dedupurls[0], 'https://www.gov.uk/api/content') == expected", "def test_url_path(self):\n response = self.client.get('/planner/recipes/1/')\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def get_api_url():\n return \"https://api.basespace.illumina.com/v1pre3\"", "def test_api_urls():\n # Test the status message - 404 not good , 200 good\n assert API_RH.create_req().status_code == 200, \"The tests for URLs were successful\"", "def test_url_path(self):\n response = self.client.get('/planner/recipes/')\n self.assertEqual(response.status_code, 200)", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "def base_request(url_path):\n response = requests.get(settings.URL_API + url_path)\n if response.status_code != 200:\n return response\n else:\n return response.json()", "def test_api_samples(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('samples', r)", "def __init__(self, base_url):\n self.base_url = '/'.join([base_url, str(self.API_VERSION)])", "def __init__(self, base_url):\n self.base_url = '/'.join([base_url, str(self.API_VERSION)])", "def __init__(self, base_url):\n self.base_url = '/'.join([base_url, str(self.API_VERSION)])", "def test_good_get_url(self):\n result = self._search('Love Story', just_results=True)\n get_url = result[0]['get_url']\n resp = self.app.get(get_url)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('url', resp.data)\n self.assertIn('/d?', resp.data)", "def api_base_url(self):\n\n\t\treturn self._api_base_url", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def test_easily_reusable(self):\n result = get_api_url()\n\n self.assertEqual(result, 'https://FQDN/afp-api/latest')\n self.mock_sanitize_host.assert_called_once_with('afp')", "def test(base_url='http://localhost:8000/'):\n with env.cd(settings.PROJECT_PATH):\n # env.run('python rnacentral/apiv1/tests.py --base_url=%s' % base_url)\n env.run('python rnacentral/portal/tests/selenium_tests.py --base_url %s --driver=phantomjs' % base_url) # pylint: disable=C0301\n env.run('python rnacentral/apiv1/search/sequence/tests.py --base_url %s' % base_url) # pylint: disable=C0301", "def test_get_main_route():\n response = client.get(url)\n assert response.status_code == 200", "def test_get_url_base_returns_url_base(self):\n # Arrange / Act\n return_value = BlobDownloader(\n f\"{settings.SERVER_URI}/987653456789\"\n ).get_url_base()\n # Assert\n self.assertEqual(return_value, SERVER_URI)" ]
[ "0.7750673", "0.77456576", "0.7652821", "0.74328506", "0.716898", "0.71184355", "0.7075185", "0.7028354", "0.7003112", "0.69900703", "0.6973003", "0.6941978", "0.6894864", "0.6894864", "0.6892169", "0.6889323", "0.6861361", "0.6855039", "0.68360007", "0.6826019", "0.68183166", "0.68183166", "0.68183166", "0.68131644", "0.6810355", "0.67955554", "0.6779047", "0.67670965", "0.67611796", "0.6759537" ]
0.7842747
0
Check count apartments url
def test_count_apartments_urls(self): r = self.base_check_request("get", "count/apartments/") self.assertIsInstance(r, dict) self.assertIsInstance(r['count'], int)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def countdots(url): \r\n return url.count('.')", "def check_url_format(self):\r\n m = re.match(\"^http://www.tesco.com/direct/[0-9a-zA-Z-]+/[0-9-]+\\.prd$\", self.product_page_url)\r\n n = re.match(\"^http://www.tesco.com/.*$\", self.product_page_url)\r\n return (not not m) or (not not n)", "def CountAt(url):\r\n return url.count('@')", "def test_count_complexes_urls(self):\n r = self.base_check_request(\"get\", \"count/complexes/\")\n self.assertIsInstance(r, dict)\n self.assertIsInstance(r['count'], int)", "def valid(url):\n return 0 < len(urlparse(url)[1])", "def degruyterCheckSite(url):\n dgtestPhrase = 'Licensed Access'\n dgtestPhrase2 = 'viewbooktoc'\n\n # urltoCheck = input(\"\\n what is the URL? \\n\")\n\n urltoCheck = url\n\n r = requests.get(urltoCheck)\n rResult = r.text\n\n dgoutcome = 0\n if (dgtestPhrase in rResult) and (dgtestPhrase2 in rResult):\n dgoutcome = 1\n\n return dgoutcome", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def countSubDir(url):\r\n return url.count('/')", "def CountSoftHyphen(url):\r\n return url.count('-')", "def count_urls(self):\n return self.request(\"count:Message_Url\", [ None ])", "def validate_url(self):\n pass", "def find_link(self):\n links = self.driver.find_elements_by_tag_name(\"a\")\n count = 0\n for link in links:\n link = str(link.get_attribute(\"href\"))\n if self.link == ('/' + link.split('/')[-1]):\n count += 1\n return count", "def test_computed_url(self):\n t = self.create_request_object()\n self.assertEqual(\"studies\", t.url_path())", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "def _get_apt_urls_per_page(self, soup):\n\n # identify the tag that contains apt URL\n apartments = soup.find_all('div', class_='listing-item__tab-content')\n apt_urls = [apt.find('a')['href'] for apt in apartments]\n # formulate a complete apartment URL\n apt_urls = [f'{CONST.ELLIMAN_HEADER}{url}' for url in apt_urls]\n return apt_urls", "def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)", "def check_url_format(self):\n\n m = re.match(r\"^http://www\\.flipkart\\.com/.*/p/.*$\", self.product_page_url)\n\n return not not m", "def asinGeturl(url):\n asin = url.split('/')\n for i in asin:\n asinNum = i.strip()\n if len(asinNum) != 10:\n continue\n else:\n asinN = asinNum\n\n return asinN", "def item_url(url):\n return all(map(lambda x: str.isdigit(x), str(url.strip('/').split('/')[-1])))", "def check_url(self):\n\n base = 'https://www.reformagkh.ru/myhouse/profile/view/'\n\n if base not in self.url:\n raise UrlError('It is not an www.reformagkh.ru link. '\n 'Please try the correct link.')", "def check_url(url):\n return 'products.json' in url", "def test_govukurls_deduplication(self):\n\n assert len(self.urlsclass.dedupurls) < len(self.urls)\n assert len(self.urlsclass.dedupurls) == len(self.urls) / 2", "def check_url_and_raise_errors(url: str) -> None:\n if not url:\n raise_error(\"Url can not be empty\", 400)\n\n try:\n URL_REGEX.match(url).span()[1] - URL_REGEX.match(url).span()[0] == len(url)\n except AttributeError:\n raise_error(\"Url should be valid\", 400)", "def check_urls(quartus_versions):\n success = True\n for quartus in quartus_versions.keys():\n parts = quartus_versions[quartus]\n parts_str = [str(k) for k in parts.keys()]\n #print(\"Checking Quartus %s, available parts (%s)\\n\" % (quartus, \",\".join(parts_str)))\n for part in parts:\n result = test_url(quartus, part, parts[part])\n if not result:\n print(\"\\nMissing %s/%s url=%s\" % (quartus, part, parts[part]))\n success = False\n return success", "def test_increment_view_count(self):\n shortUrl = 'increment_url'\n url = 'http://www.google.com'\n author = 'author'\n self.urlShortener.saveUrl(shortUrl, url, author)\n\n self.urlShortener.increment_visited_count(shortUrl)\n self.urlShortener.increment_visited_count(shortUrl)\n\n doc = self.urlShortener.get_doc_from_shorturl(shortUrl)\n self.assertEqual(int(doc['clicks']), 2)\n\n self.urlShortener.removeUrl(shortUrl)", "def test_computed_url(self):\n t = TwoHundredRequest()\n self.assertEqual(\"twohundred\", t.url_path())", "def long_url(l):\r\n l= str(l)\r\n if len(l) < 53:\r\n return 0\r\n elif len(l)>=53 and len(l)<75:\r\n return 2\r\n else:\r\n return 1", "def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1", "def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False", "def match_url(self, url):\n pass" ]
[ "0.6570135", "0.62185955", "0.61818105", "0.60949767", "0.5965347", "0.59338355", "0.589852", "0.58700925", "0.5813304", "0.57668436", "0.57417595", "0.56799453", "0.5673117", "0.5641667", "0.5640872", "0.5614625", "0.56092155", "0.558392", "0.554869", "0.5544998", "0.5539954", "0.5539812", "0.55249125", "0.5516439", "0.5503539", "0.54859203", "0.5454766", "0.5450949", "0.54376584", "0.54321706" ]
0.6989395
0
Check count complexes url
def test_count_complexes_urls(self): r = self.base_check_request("get", "count/complexes/") self.assertIsInstance(r, dict) self.assertIsInstance(r['count'], int)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def countdots(url): \r\n return url.count('.')", "def countSubDir(url):\r\n return url.count('/')", "def CountAt(url):\r\n return url.count('@')", "def test_count_apartments_urls(self):\n r = self.base_check_request(\"get\", \"count/apartments/\")\n self.assertIsInstance(r, dict)\n self.assertIsInstance(r['count'], int)", "def CountDSlash(url):\r\n return url.count('//')", "def count_urls(self):\n return self.request(\"count:Message_Url\", [ None ])", "def valid(url):\n return 0 < len(urlparse(url)[1])", "def test_increment_view_count(self):\n shortUrl = 'increment_url'\n url = 'http://www.google.com'\n author = 'author'\n self.urlShortener.saveUrl(shortUrl, url, author)\n\n self.urlShortener.increment_visited_count(shortUrl)\n self.urlShortener.increment_visited_count(shortUrl)\n\n doc = self.urlShortener.get_doc_from_shorturl(shortUrl)\n self.assertEqual(int(doc['clicks']), 2)\n\n self.urlShortener.removeUrl(shortUrl)", "def CountSoftHyphen(url):\r\n return url.count('-')", "def count_words_at_url(url):\n resp = requests.get(url)\n print(len(resp.text.split()))\n return len(resp.text.split())", "def countQueries(query):\r\n if not query:\r\n return 0\r\n else:\r\n return len(query.split('&'))", "def find_link(self):\n links = self.driver.find_elements_by_tag_name(\"a\")\n count = 0\n for link in links:\n link = str(link.get_attribute(\"href\"))\n if self.link == ('/' + link.split('/')[-1]):\n count += 1\n return count", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def __countMatches(self, http_resource):\n matches = 0\n for b in self.browsed:\n if (http_resource.path == b.path and http_resource.method == b.method == \"GET\"):\n qs = http_resource.encoded_params\n u = b.encoded_params\n if http_resource.encoded_get_keys == b.encoded_get_keys:\n # key and value in the query string\n if \"=\" in qs:\n i = 0\n for __ in xrange(0, qs.count(\"=\")):\n start = qs.find(\"=\", i)\n i = qs.find(\"&\", start)\n if i != -1:\n if u.startswith(qs[:start] + \"=\") and u.endswith(qs[i:]):\n matches += 1\n else:\n if u.startswith(qs[:start] + \"=\"):\n matches += 1\n else:\n # only a key name is query string (eg: path?key_name)\n if \"&\" not in qs and \"&\" not in u:\n matches += 1\n return matches", "def test_uri_len(self):\n client = self.base_scenario(\n frang_config=\"http_uri_len 5;\",\n requests=[\n [\n (\":authority\", \"example.com\"),\n (\":path\", \"/123456789\"),\n (\":scheme\", \"https\"),\n (\":method\", \"POST\"),\n ]\n ],\n )\n self.check_response(\n client, status_code=\"403\", warning_msg=\"frang: HTTP URI length exceeded for\"\n )", "def test_parsed_hsps(self):\n n_hsps = 0\n for query_id, hsps in self.result.iter_hits_by_query():\n n_hsps += len(hsps)\n self.assertEqual(n_hsps, 3)", "def check_url_format(self):\r\n m = re.match(\"^http://www.tesco.com/direct/[0-9a-zA-Z-]+/[0-9-]+\\.prd$\", self.product_page_url)\r\n n = re.match(\"^http://www.tesco.com/.*$\", self.product_page_url)\r\n return (not not m) or (not not n)", "def check_url(url):\n return 'products.json' in url", "def count_request_contains_str(sting_input):\n request_list = var_cache['local'].get_request_list()\n match_count = 0\n for url in request_list:\n if url.find(sting_input) > -1:\n match_count += 1\n return match_count", "def count_words_at_url(url):\n return len(urlopen(url).read().split())", "def IsValidURL(s):\n return RE_COMPLEX_URL.match(s)", "def testLongURL(self):\n self.assertEqual([], grab('www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www', self.needScheme))", "def how_many_entries(URL):\n\treturn len(get_lottery_numbers(URL))", "def test_computed_url(self):\n t = self.create_request_object()\n self.assertEqual(\"studies\", t.url_path())", "def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count", "def test_uri_len(self):\n client = self.base_scenario(\n frang_config=\"http_uri_len 5;\",\n requests=[\"POST /123456789 HTTP/1.1\\r\\nHost: localhost\\r\\n\\r\\n\"],\n )\n self.check_response(\n client, status_code=\"403\", warning_msg=\"frang: HTTP URI length exceeded for\"\n )", "def validate_url(self):\n pass", "def _check_grib(self, url):\n head = requests.head(url)\n check_exists = head.ok\n if check_exists:\n check_content = int(head.raw.info()['Content-Length']) > 1_000_000\n return check_exists and check_content\n else:\n return False", "def check_url_and_raise_errors(url: str) -> None:\n if not url:\n raise_error(\"Url can not be empty\", 400)\n\n try:\n URL_REGEX.match(url).span()[1] - URL_REGEX.match(url).span()[0] == len(url)\n except AttributeError:\n raise_error(\"Url should be valid\", 400)", "def count(self):\n return len(self.__links)" ]
[ "0.6884328", "0.66283315", "0.65081114", "0.638867", "0.6249238", "0.62182957", "0.6169778", "0.611569", "0.6075261", "0.58923984", "0.58567244", "0.5837225", "0.5818708", "0.57962257", "0.5749233", "0.57347083", "0.57246214", "0.5721146", "0.5709762", "0.56962866", "0.56862783", "0.5685042", "0.56845", "0.5669808", "0.5652285", "0.5627249", "0.5621357", "0.5613742", "0.5612393", "0.56005925" ]
0.8033261
0
Check apartments search form url
def test_search_form_apartments_urls(self): r_keys = ['balcony_types', 'bathroom_type', 'building_floors_max', 'building_floors_min', 'building_type', 'decoration', 'elevators_type', 'floor_max', 'floor_min', 'infrastructure', 'living_area_max', 'living_area_min', 'metro_stations', 'price_per_m_max', 'price_per_m_min', 'regions', 'rooms_count', 'total_area_max', 'total_area_min'] r = self.check_request_keys("get", "search-forms/apartments/", r_keys) self.check_list_item_keys(r["balcony_types"], ['id', 'name']) self.check_list_item_keys(r["bathroom_type"], ['id', 'name']) self.assertIsInstance(r['building_floors_max'], int) self.assertIsInstance(r['building_floors_min'], int) self.check_list_item_keys(r["building_type"], ['id', 'name']) self.assertIsInstance(r['decoration'], list) self.assertEqual(r['decoration'], []) self.check_list_item_keys(r["elevators_type"], ['id', 'name']) self.assertIsInstance(r['floor_max'], int) self.assertIsInstance(r['floor_min'], int) self.assertIsInstance(r['infrastructure'], list) self.assertEqual(r['infrastructure'], []) self.assertIsInstance(r['living_area_max'], int) self.assertIsInstance(r['living_area_min'], int) self.check_list_item_keys(r["metro_stations"], ['id', 'name']) self.assertIsInstance(r['price_per_m_max'], int) self.assertIsInstance(r['price_per_m_min'], int) self.check_list_item_keys(r["regions"], ['format', 'id', 'locations', 'name', 'slug', 'typeBeforeLocation', 'typeName', 'typePrepositionalShortName', 'typeShortName']) self.check_list_items_type(r['rooms_count'], int) self.assertIsInstance(r['total_area_max'], int) self.assertIsInstance(r['total_area_min'], int)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(request):\n\n # get form data \n searchItem = request.GET.get(\"q\")\n # if searchItem is an exact match redirect to that page\n if (util.get_entry(searchItem) is not None):\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": searchItem\n }))\n # add any pages with the string in it to results list \n else: \n results = []\n substring = False\n for title in util.list_entries():\n if searchItem.upper() in title.upper():\n results.append(title)\n if results:\n substring = True\n # return results\n return render(request, \"encyclopedia/search.html\", {\n \"searchItem\": searchItem,\n \"substring\": substring,\n \"results\": results\n })", "def query(url):", "def search(request):\n\n term = \"\"\n organizations = None\n memberships = None\n events = None\n persons = None\n airports = None\n training_requests = None\n comments = None\n only_result = None\n\n if request.method == \"GET\" and \"term\" in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n term = form.cleaned_data.get(\"term\", \"\")\n tokens = re.split(r\"\\s+\", term)\n\n organizations = Organization.objects.filter(\n Q(domain__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"fullname\")\n if len(organizations) == 1 and not only_result:\n only_result = organizations[0]\n\n memberships = Membership.objects.filter(\n registration_code__icontains=term\n ).order_by(\"-agreement_start\")\n if len(memberships) == 1 and not only_result:\n only_result = memberships[0]\n\n events = Event.objects.filter(\n Q(slug__icontains=term)\n | Q(host__domain__icontains=term)\n | Q(host__fullname__icontains=term)\n | Q(url__icontains=term)\n | Q(contact__icontains=term)\n | Q(venue__icontains=term)\n | Q(address__icontains=term)\n ).order_by(\"-slug\")\n if len(events) == 1 and not only_result:\n only_result = events[0]\n\n # if user searches for two words, assume they mean a person\n # name\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n (Q(personal__icontains=name1) & Q(family__icontains=name2))\n | (Q(personal__icontains=name2) & Q(family__icontains=name1))\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n )\n persons = Person.objects.filter(complex_q)\n else:\n persons = Person.objects.filter(\n Q(personal__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n ).order_by(\"family\")\n\n if len(persons) == 1 and not only_result:\n only_result = persons[0]\n\n airports = Airport.objects.filter(\n Q(iata__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"iata\")\n if len(airports) == 1 and not only_result:\n only_result = airports[0]\n\n training_requests = TrainingRequest.objects.filter(\n Q(group_name__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(github__icontains=term)\n | Q(affiliation__icontains=term)\n | Q(location__icontains=term)\n | Q(user_notes__icontains=term)\n )\n if len(training_requests) == 1 and not only_result:\n only_result = training_requests[0]\n\n comments = Comment.objects.filter(\n Q(comment__icontains=term)\n | Q(user_name__icontains=term)\n | Q(user_email__icontains=term)\n | Q(user__personal__icontains=term)\n | Q(user__family__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__github__icontains=term)\n ).prefetch_related(\"content_object\")\n if len(comments) == 1 and not only_result:\n only_result = comments[0]\n\n # only 1 record found? Let's move to it immediately\n if only_result and not form.cleaned_data[\"no_redirect\"]:\n msg = format_html(\n \"You were moved to this page, because your search <i>{}</i> \"\n \"yields only this result.\",\n term,\n )\n if isinstance(only_result, Comment):\n messages.success(request, msg)\n return redirect(\n only_result.content_object.get_absolute_url()\n + \"#c{}\".format(only_result.id)\n )\n elif hasattr(only_result, \"get_absolute_url\"):\n messages.success(request, msg)\n return redirect(only_result.get_absolute_url())\n\n else:\n messages.error(request, \"Fix errors below.\")\n\n # if empty GET, we'll create a blank form\n else:\n form = SearchForm()\n\n context = {\n \"title\": \"Search\",\n \"form\": form,\n \"term\": term,\n \"organisations\": organizations,\n \"memberships\": memberships,\n \"events\": events,\n \"persons\": persons,\n \"airports\": airports,\n \"comments\": comments,\n \"training_requests\": training_requests,\n }\n return render(request, \"dashboard/search.html\", context)", "def hyperlink_search(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\tif 'UniProtKB Accession' in request.GET and request.GET['UniProtKB Accession'] or \\\n\t'Protein' in request.GET and request.GET['Protein'] or \\\n\t'Gene' in request.GET and request.GET['Gene'] or \\\n\t'Organism' in request.GET and request.GET['Organism'] or \\\n\t'Organismid' in request.GET and request.GET['Organismid'] or \\\n\t'SubCellular' in request.GET and request.GET['SubCellular'] or \\\n\t'Peptide Sequence' in request.GET and request.GET['Peptide Sequence'] or \\\n\t'Pathway Name' in request.GET and request.GET['Pathway Name'] or \\\n\t'Disease Name' in request.GET and request.GET['Disease Name'] or \\\n\t'Go ID' in request.GET and request.GET['Go ID'] or \\\n\t'Go Name' in request.GET and request.GET['Go Name'] or \\\n\t'Go Term' in request.GET and request.GET['Go Term'] or \\\n\t'AssayFdaApproveMark' in request.GET and request.GET['AssayFdaApproveMark']:\n\t\tuseruniprotkb =\"\"\n\t\tuserprotein =\"\"\n\t\tusergeneid =\"\"\n\t\tuserorg=\"\"\n\t\tuserorgid=\"\"\n\t\tusersubcell =\"\"\n\t\tuserpepseq =\"\"\n\t\tuserpathway =\"\"\n\t\tuserdis =\"\"\n\t\tusergoid =\"\"\n\t\tusergotn =\"\"\n\t\tusergot=\"\"\n\t\tuserassayfdaapprovemark=\"\"\n\t\tfinalsearhdata=''\n\t\ttry:\n\t\t\tuseruniprotkb = request.GET[\"UniProtKB Accession\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in useruniprotkb:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('|')\n\t\telse:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('\\\\n')\n\t\tuseruniprotkb=[(item.strip()).lower() for item in useruniprotkb]\n\t\tuseruniprotkb=map(str, useruniprotkb)\n\t\tuseruniprotkb=filter(None, useruniprotkb)\n\n\t\ttry:\n\t\t\tuserprotein = request.GET[\"Protein\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userprotein:\n\t\t\tuserprotein=(userprotein.strip()).split('|')\n\t\telse:\n\t\t\tuserprotein=(userprotein.strip()).split('\\\\n')\n\t\tuserprotein=[(item.strip()).lower() for item in userprotein]\n\t\tuserprotein=map(str, userprotein)\n\t\tuserprotein=filter(None, userprotein)\n\n\t\ttry:\n\t\t\tusergeneid = request.GET[\"Gene\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergeneid:\n\t\t\tusergeneid=(usergeneid.strip()).split('|')\n\t\telse:\n\t\t\tusergeneid=(usergeneid.strip()).split('\\\\n')\n\t\tusergeneid=[(item.strip()).lower() for item in usergeneid]\n\t\tusergeneid=map(str, usergeneid)\n\t\tusergeneid=filter(None, usergeneid)\n\n\t\ttry:\n\t\t\tuserorg = request.GET[\"Organism\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorg:\n\t\t\tuserorg=(userorg.strip()).split('|')\n\t\telse:\n\t\t\tuserorg=(userorg.strip()).split('\\\\n')\n\t\tuserorg=[(item.strip()).lower() for item in userorg]\n\t\tuserorg=map(str, userorg)\n\t\tuserorg=filter(None, userorg)\n\n\t\ttry:\n\t\t\tuserorgid = request.GET[\"Organismid\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorgid:\n\t\t\tuserorgid=(userorgid.strip()).split('|')\n\t\telse:\n\t\t\tuserorgid=(userorgid.strip()).split('\\\\n')\n\t\tuserorgid=[(item.strip()).lower() for item in userorgid]\n\t\tuserorgid=map(str, userorgid)\n\t\tuserorgid=filter(None, userorgid)\n\n\t\ttry:\n\t\t\tusersubcell = request.GET[\"SubCellular\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usersubcell:\n\t\t\tusersubcell=(usersubcell.strip()).split('|')\n\t\telse:\n\t\t\tusersubcell=(usersubcell.strip()).split('\\\\n')\n\t\tusersubcell=[(item.strip()).lower() for item in usersubcell]\n\t\tusersubcell=map(str, usersubcell)\n\t\tusersubcell=filter(None, usersubcell)\n\n\t\ttry:\n\t\t\tuserpepseq = request.GET[\"Peptide Sequence\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpepseq:\n\t\t\tuserpepseq=(userpepseq.strip()).split('|')\n\t\telse:\n\t\t\tuserpepseq=(userpepseq.strip()).split('\\\\n')\n\t\tuserpepseq=[(item.strip()).lower() for item in userpepseq]\n\t\tuserpepseq=map(str, userpepseq)\n\t\tuserpepseq=filter(None, userpepseq)\n\n\t\ttry:\n\t\t\tuserpathway = request.GET[\"Pathway Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpathway:\n\t\t\tuserpathway=(userpathway.strip()).split('|')\n\t\telse:\n\t\t\tuserpathway=(userpathway.strip()).split('\\\\n')\n\t\tuserpathway=[(item.strip()).lower() for item in userpathway]\n\t\tuserpathway=map(str, userpathway)\n\t\tuserpathway=filter(None, userpathway)\n\n\t\ttry:\n\t\t\tuserdis = request.GET[\"Disease Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userdis:\n\t\t\tuserdis=(userdis.strip()).split('|')\n\t\telse:\n\t\t\tuserdis=(userdis.strip()).split('\\\\n')\n\t\tuserdis=[(item.strip()).lower() for item in userdis]\n\t\tuserdis=map(str, userdis)\n\t\tuserdis=filter(None, userdis)\n\n\t\ttry:\n\t\t\tusergoid = request.GET[\"Go ID\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergoid:\n\t\t\tusergoid=(usergoid.strip()).split('|')\n\t\telse:\n\t\t\tusergoid=(usergoid.strip()).split('\\\\n')\n\t\tusergoid=[(item.strip()).lower() for item in usergoid]\n\t\tusergoid=map(str, usergoid)\n\t\tusergoid=filter(None, usergoid)\n\n\t\ttry:\n\t\t\tusergotn = request.GET[\"Go Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergotn:\n\t\t\tusergotn=(usergotn.strip()).split('|')\n\t\telse:\n\t\t\tusergotn=(usergotn.strip()).split('\\\\n')\n\t\tusergotn=[(item.strip()).lower() for item in usergotn]\n\t\tusergotn=map(str, usergotn)\n\t\tusergotn=filter(None, usergotn)\n\n\t\ttry:\n\t\t\tusergot = request.GET[\"Go Term\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergot:\n\t\t\tusergot=(usergot.strip()).split('|')\n\t\telse:\n\t\t\tusergot=(usergot.strip()).split('\\\\n')\n\t\tusergot=[(item.strip()).lower() for item in usergot]\n\t\tusergot=map(str, usergot)\n\t\tusergot=filter(None, usergot)\n\n\t\ttry:\n\t\t\tuserassayfdaapprovemark = request.GET[\"AssayFdaApproveMark\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userassayfdaapprovemark:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('|')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\telse:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('\\\\n')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\tuserassayfdaapprovemark=[(item.strip()).lower() for item in userassayfdaapprovemark]\n\t\tuserassayfdaapprovemark=map(str, userassayfdaapprovemark)\n\t\tuserassayfdaapprovemark=filter(None, userassayfdaapprovemark)\n\n\t\tspquerylist =[]\n\t\tsearchtermlist=[]\n\n\t\tif len(useruniprotkb) >0:\n\t\t\tfinalsearhdata+='UniProtKB Accession:'+';'.join(useruniprotkb)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in useruniprotkb:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"UniProtKB Accession.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userprotein)> 0:\n\t\t\tfinalsearhdata+='Protein:'+';'.join(userprotein)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userprotein:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Protein.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergeneid) >0:\n\t\t\tfinalsearhdata+='Gene:'+';'.join(usergeneid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergeneid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Gene.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorg) > 0:\n\t\t\tfinalsearhdata+='Organism:'+';'.join(userorg)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorg:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorgid) > 0:\n\t\t\tfinalsearhdata+='Organism ID:'+';'.join(userorgid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorgid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usersubcell) >0:\n\t\t\tfinalsearhdata+='SubCellular:'+';'.join(usersubcell)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usersubcell:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"SubCellular.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpepseq) >0:\n\t\t\tfinalsearhdata+='Peptide Sequence:'+';'.join(userpepseq)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpepseq:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Peptide Sequence.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpathway) >0:\n\t\t\tfinalsearhdata+='Pathway Name:'+';'.join(userpathway)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpathway:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Pathway Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userdis) >0:\n\t\t\tfinalsearhdata+='Disease Name:'+';'.join(userdis)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userdis:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Disease Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergoid) >0:\n\t\t\tfinalsearhdata+='Go ID:'+';'.join(usergoid)+' '\n\t\t\tsdict={}\n\t\t\tsdict[\"Go ID.ngram\"]=[i.split(' ')[0] for i in usergoid]\n\t\t\ttdict={}\n\t\t\ttdict[\"terms\"]=sdict\n\t\t\tsearchtermlist.append(tdict)\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergoid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]+={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergotn) >0:\n\t\t\tfinalsearhdata+='Go Name:'+';'.join(usergotn)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergotn:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergot) > 0:\n\t\t\tfinalsearhdata+='Go Term:'+';'.join(usergot)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergot:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Term.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(userassayfdaapprovemark) > 0:\n\t\t\tfinalsearhdata+='Assays for FDA approved Marker::'+';'.join(userassayfdaapprovemark)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userassayfdaapprovemark:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Assays for FDA approved Marker.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(searchtermlist)>0:\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\n\t\t\tquery={\n\t\t\t\t\"query\": {\n\t\t\t\t\t\"bool\": {\n\t\t\t\t\t\t\"must\":searchtermlist\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tnameFIle=names.get_first_name()\n\t\t\tjsonfilename=nameFIle+'_advance_search.json'\n\t\t\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'results', jsonfilename)\n\t\t\tjsonfileoutput= open(jsonfilepath,'w')\n\t\t\tjfinaldata=[]\n\t\t\tres=helpers.scan(client=es,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=30)\n\t\t\tjfinaldata=[]\n\t\t\tfor i in res:\n\t\t\t\tjdic=i['_source']\n\t\t\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\t\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\t\t\tjdic[\"sel\"] =\"\"\n\t\t\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\t\t\tjdic[\"Mean Concentration\"] =jdic[\"Mean Concentration\"].replace('fmol/','fmol/µ')\n\t\t\t\t\tjdic[\"Concentration\"] =jdic[\"Concentration\"].replace('fmol/','fmol/µ')\t\t\t\t\t\n\t\t\t\t\tjfinaldata.append(jdic)\n\n\t\t\tfoundHits=len(jfinaldata)\n\t\t\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\t\t\tjsonfileoutput.close()\n\n\t\t\tif foundHits >0:\n\t\t\t\tstatsummary=summaryStatcal(jfinaldata)\n\t\t\t\tpathwaychart=statsummary['pathwaychart']\n\t\t\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\t\t\tspecieslist=statsummary['specieslist']\n\t\t\t\ttotallist=statsummary['total']\n\t\t\t\tsubcell=statsummary['subcell']\n\t\t\t\tgodic=statsummary['godic']\n\t\t\t\tjvennprot=statsummary['jevennstat'][0]\n\t\t\t\tjvennpep=statsummary['jevennstat'][1]\n\t\t\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\t\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1]))\n\t\t\t\tupdatedgodic=dict(list(sortedgodic.items())[:10])\n\t\t\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries']))\n\t\t\t\tprodataseries=statsummary['prodataseries']\n\t\t\t\tunqisostat=statsummary['unqisostat']\n\t\t\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'statsummary', jsonfilename)\n\t\t\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\t\t\tjson.dumps(statsummary,jsonfileoutputStat)\n\t\t\t\tjsonfileoutputStat.close()\n\t\t\t\turlname=\"'/resultFile/jsonData/resultJson/adavancesearch/results/\"+jsonfilename+\"'\"\n\t\t\t\tcontextindex={\n\t\t\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),\n\t\t\t\t\t'query': finalsearhdata,'foundHits':foundHits,\n\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t\t\t}\n\t\t\t\treturn render(request,'resultform.html',contextindex)\n\t\t\telse:\n\t\t\t\treturn render(request,'resultform.html',{'foundHits':foundHits})", "def degruyterCheckSite(url):\n dgtestPhrase = 'Licensed Access'\n dgtestPhrase2 = 'viewbooktoc'\n\n # urltoCheck = input(\"\\n what is the URL? \\n\")\n\n urltoCheck = url\n\n r = requests.get(urltoCheck)\n rResult = r.text\n\n dgoutcome = 0\n if (dgtestPhrase in rResult) and (dgtestPhrase2 in rResult):\n dgoutcome = 1\n\n return dgoutcome", "def search(request):\n raise NotImplementedError", "def search(request):\n\n if request.method == \"POST\":\n form = SearchForm(request.POST)\n\n if form.is_valid():\n title = form.cleaned_data[\"title\"]\n entryMD = util.get_entry(title)\n\n print('search request: ', title)\n\n if entryMD:\n return redirect(reverse('entry', args=[title]))\n else:\n relatedTitles = util.relatedTitles(title)\n\n return render(request, \"encyclopedia/search.html\", {\n \"title\": title,\n \"relatedTitles\": relatedTitles,\n \"searchForm\": SearchForm()\n })\n return redirect(reverse('index'))", "def get_query_url(self, search_args):\n self._browser.open(\"http://poe.trade/\")\n # There are two forms, the second one is the search form\n # Both forms don't have names so we just know the 2nd one is the right one\n self._browser.form = list(self._browser.forms())[1]\n \n # Populate the forms with the stuff we want\n for form_name in search_args:\n control = self._browser.form.find_control(form_name)\n control.value = search_args[form_name]\n \n # By default we want people are are online and accepting buyouts\n buyout_control = self._browser.form.find_control(name=\"has_buyout\")\n online_control = self._browser.form.find_control(name=\"online\")\n buyout_control.value = [\"1\"]\n online_control.value = [\"x\"]\n \n search_response = self._browser.submit()\n return search_response.geturl()", "def search_page(request):\n if request.method == \"GET\":\n page = request.GET.get('q')\n entries = util.list_entries()\n entries_set=set(entries)\n\n if page in entries_set:\n return render(request, \"encyclopedia/visit_entry.html\",{\n \"entry\": util.get_entry(page),\n \"title\": page\n })\n \n else:\n results = list(filter(lambda x: page in x, entries))\n return render(request, \"encyclopedia/search_page.html\",{\n \"results\": results\n })", "def search(self, address='', url=True):\n baseurl = 'https://www.redfin.com/'\n try:\n self.driver.get(baseurl)\n if not address:\n print(f'---- testing {self.driver.current_url}')\n return None\n search_input = self.driver.find_element_by_xpath(\n '//input[@type=\"search\"]')\n search_input.send_keys(address)\n search_btn = self.driver.find_element_by_xpath(\n '//button[@data-rf-test-name=\"searchButton\"]')\n search_btn.click()\n self.driver.find_element_by_xpath(\n '//span[@itemprop=\"streetAddress\"]')\n result = self.driver.current_url\n self.detail_statu = True\n self.log.debug('---- Property page : %s', result)\n if url:\n return result\n except NoSuchElementException as e:\n self.log.info('---- No such element for : \"%s\"', address)\n return None\n except Exception as e:\n self.log.error('---- Search Error : %s', e)\n result = 'None'\n if url:\n return result", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def get_url(soup):\r\n \"\"\"criteria: any(s in a[\"title\"] for s in ('新增', '確診', '肺炎')\"\"\"\r\n url_list = []\r\n for a in soup.find_all('a', {\"href\": re.compile(\"typeid=9$\")}):\r\n if any(s in a[\"title\"] for s in ('新增', '確診', '肺炎')):\r\n url = \"https://www.cdc.gov.tw\" + a['href']\r\n url_list.append(url)\r\n return url_list", "def search(query_string):", "def search_query():\n g.form.process(request.form)\n\n if g.form.submit.data and g.form.search.data:\n query = g.form.search.data\n try:\n result = search.search_code(query)\n except search.NoPostcode:\n # Pass along to search results page to process\n return redirect(url_for(\".search_results\", query=query))\n\n if isinstance(result, models.StopPoint):\n return redirect(url_for(\".stop_atco\", atco_code=result.atco_code))\n elif isinstance(result, models.Postcode):\n return redirect(url_for(\".list_near_postcode\", code=result.text))\n else:\n return redirect(url_for(\".search_results\", query=query))\n else:\n return redirect(url_for(\".search_results\"))", "def search():\n\n # POST\n if request.method == \"POST\":\n\n # validate form submission\n if not request.form.get(\"intervention\"):\n return render_template(\"results.html\", results=entries.values())\n ''' \n elif not request.form.get(\"setting\"):\n return apology(\"missing setting\")\n elif not request.form.get(\"emrpref\"):\n return apology(\"missing emr pref\")\n elif not request.form.get(\"budget\"):\n return apology(\"missing budget\")'''\n \n results = []\n for k in entries:\n print('entries', entries[k]['Keywords'])\n print('term', request.form.get(\"intervention\"))\n if request.form.get(\"intervention\") in entries[k]['Keywords']:\n print('ya')\n results.append(entries[k])\n\n\n return render_template(\"results.html\", results=results)\n\n\n # GET\n else:\n return render_template(\"search.html\")", "def test_small_search_exists(self):\n\n search_html = 'agency search--box scrollable-dropdown-menu'\n\n response = self.client.get(reverse('learn'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('about'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('agencies'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developers'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developer'))\n self.assertContains(response, search_html)\n\n response = self.client.get(\n reverse(\n 'contact_landing',\n args=['department-of-commerce--census-bureau']))\n self.assertContains(response, search_html)", "def contain_url(self):\n url = self.url\n\n d_month_year = self.get_date_year_month(self.depart_date)\n d_day = self.get_date_day(self.depart_date)\n if self.return_date == '':\n # If no return date is entered,\n # the 'search_type' parameter\n # is set to 'OW' (One Way).\n search_type = 'OW'\n parameters = self.get_parameters_string(\n search_type, d_month_year, d_day)\n else:\n # If a return date is entered,\n # the 'search_type' parameter\n # is set to 'RT' (Round Trip).\n search_type = 'RT'\n r_month_year = self.get_date_year_month(self.return_date)\n r_day = self.get_date_day(self.return_date)\n parameters = self.get_parameters_string(\n search_type, d_month_year, d_day,\n r_month_year, r_day)\n url = url + parameters\n return url", "def parse_apartment_urls(self):\n\n # Generate soup for starting page\n soup = generate_soup(self.start_url)\n\n # Empties the urls list, in case it wasn't before\n self.apartment_urls = []\n\n # Get apartments in current page and store\n current_page_apartment_urls = self.list_get_apartment_urls(soup)\n self.apartment_urls = self.apartment_urls + current_page_apartment_urls\n\n # Check if there are more page to pull from\n while self.list_has_next_page(soup):\n soup = self.list_get_next_page(soup)\n\n # Get apartments in current page\n current_page_apartment_urls = self.list_get_apartment_urls(soup)\n self.apartment_urls = self.apartment_urls + current_page_apartment_urls", "def test_agencies_search_list(self):\n\n query = \"department\"\n response = self.client.get(reverse('agencies') + \"?query=\" + query)\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Department of Homeland Security' in content)\n self.assertTrue('Department of Commerce' in content)\n self.assertTrue('Patent and Trademark Office' not in content)", "def search_against_url(request, url):\n\n (scheme, _1, _2, _3, _4, _5) = urlparse(url)\n if scheme not in ('http', 'https'):\n return search_page(request, error='The URL must begin with either http or https.')\n\n sfm = from_django_conf('sidebyside')\n try:\n (title, text) = fetch_and_clean(url)\n except requests.exceptions.Timeout:\n return search_page(request, error=\"Sorry, that news article couldn't be retrieved.\")\n\n try:\n sfm_results = sfm.search(text=text, title=title, url=url)\n drop_silly_results(sfm_results)\n sort_by_coverage(sfm_results)\n\n\n #if they submit a url, don't return the exact same url in the results\n for r in sfm_results['documents']['rows']:\n if r.get('url') == url:\n sfm_results['documents']['rows'].remove(r)\n\n if sfm_results.has_key('text'): text = sfm_results['text']\n else: text = ''\n\n if sfm_results.has_key('title'): title = sfm_results['title']\n else: title='No Title'\n\n return search_result_page(request, sfm_results, text,\n source_title=title, source_url=url)\n except superfastmatch.SuperFastMatchError, e:\n if e.status == httplib.NOT_FOUND:\n raise HttpResponse('No such article {0}'.format(url))\n elif settings.DEBUG == True:\n return HttpResponse(e.response[1], status=e.response[0])\n else:\n raise", "def QAsearch():\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer',word=question))\n return render_template(\n 'QAsearch.html',\n title = 'QAsearch Page',\n year = datetime.now().year,\n form = form,\n question = question\n )", "def search(self, query):", "def test_get_search_page_url(self):\n\n keywords = [\"design\", \"desk\"]\n search_page_base_url = self.test_data[\"search_page_base_url\"]\n\n fragment = quote(\" \".join(keywords))\n url = urljoin(search_page_base_url, fragment)\n\n self.assertEqual(self.retriever._get_search_page_url(keywords, 1), url, \\\n msg = \"The search page URL is malformed\" )\n\n next_page_query = \"?\" + urlencode({\"page\" : str(2)})\n url += next_page_query\n\n self.assertEqual(self.retriever._get_search_page_url(keywords, 2), url, \\\n msg = \"The search page URL is malformed\" )", "def match_url(self, url):\n pass", "def test_search_form_main_urls(self):\n r_keys = ['price_max', 'price_min', 'rooms_count']\n r = self.check_request_keys(\"get\", \"search-forms/main/\", r_keys)\n\n self.assertIsInstance(r['price_min'], int)\n self.assertIsInstance(r['price_max'], int)\n self.check_list_items_type(r['rooms_count'], int)", "def search_antiques(request):\n query = request.GET.get('q')\n\n if query:\n results = Antiques.objects.filter(Q(name__icontains=query) | Q(description__icontains=query))\n else:\n results = Antiques.objects.all()\n pages = pagination(request, results, num=4)\n\n context = {\n 'items': pages[0],\n 'page_range': pages[1],\n 'query': query,\n }\n\n return render(request, \"antiques.html\", context)", "def form_search_url(self):\r\n self.reformat_search_for_spaces()\r\n self.target_yt_search_url_str = self.prefix_of_search_url + self.yt_search_key + self.filter_url_portion", "def search_process():\n\n # processing search parameters common to each person\n open_now = request.args.get(\"open_now\")\n time = request.args.get(\"time\")\n limit = request.args.get(\"limit\")\n search_type = request.args.get(\"search-type\")\n\n # person 1's search parameters\n your_term = request.args.get(\"your_term\")\n your_latitude = float(request.args.get(\"your_latitude\"))\n your_longitude = float(request.args.get(\"your_longitude\"))\n your_radius = request.args.get(\"your_radius\")\n your_price = str(request.args.get(\"your_price\"))\n # person 2's search parameters\n friends_latitude = float(request.args.get(\"friends_latitude\"))\n friends_longitude = float(request.args.get(\"friends_longitude\"))\n friends_price = str(request.args.get(\"friends_price\"))\n\n # uses the Google Maps API to geocode and functions written in midpt_formula.py\n # to find the midpoint of the two given addresses\n # your_location = geocoding(st_address1, city1, state1)\n # friends_location = geocoding(st_address2, city2, state2)\n your_location = [your_latitude, your_longitude]\n friends_location = [friends_latitude, friends_longitude]\n mid_lat, mid_lng = midpt_formula(your_location, friends_location)\n\n if search_type == 'midpt':\n friends_term = request.args.get(\"friends_term\")\n friends_radius = request.args.get(\"friends_radius\")\n sort_by = request.args.get(\"sort_by\")\n # sort only works for midpt because of the sets used in venn diagram calculations\n\n params_midpt = {'term': avoid_term_duplicates(your_term, friends_term),\n 'latitude': mid_lat,\n 'longitude': mid_lng,\n 'radius': mi_to_m(stricter_radius(your_radius, friends_radius)),\n 'sort_by': sort_by,\n 'limit': limit,\n }\n\n if time:\n params_midpt['open_at'] = unix_time(time)\n elif open_now:\n params_midpt['open_now'] = open_now\n\n # results for Midpoint Formula calculation Yelp search\n responses = search_yelp(params_midpt)\n\n elif search_type == 'venn':\n # the dictionary of search parameters to submit to the Yelp API\n your_parameters = {'term': your_term,\n 'latitude': your_location[0],\n 'longitude': your_location[1],\n 'radius': mi_to_m(your_radius),\n }\n # import pdb; pdb.set_trace()\n distance_between_us = calculate_distance(tuple(your_location), tuple(friends_location))\n friends_parameters = {'term': your_term,\n 'latitude': friends_location[0],\n 'longitude': friends_location[1],\n 'radius': distance_between_us,\n }\n\n # adds the search parameter price if either user inputs a price\n if your_price or friends_price:\n your_parameters['price'] = avoid_price_duplicates(your_price, friends_price)\n friends_parameters['price'] = avoid_price_duplicates(your_price, friends_price)\n\n # adds the business hours parameter if they specify whether they would want\n # to go to the business now or at a future time\n if time:\n your_parameters['open_at'] = unix_time(time)\n friends_parameters['open_at'] = unix_time(time)\n elif open_now:\n your_parameters['open_now'] = open_now\n friends_parameters['open_now'] = open_now\n\n # results for Venn Diagram calculation: two separate Yelp searches for both\n your_search_results = search_yelp(your_parameters)\n friends_search_results = search_yelp(friends_parameters)\n\n\n # finding the results common to both and adding them to a dictionary\n responses = {'businesses': get_common_restaurants(your_search_results, friends_search_results)}\n\n responses['your_location'] = your_location\n responses['friends_location'] = friends_location\n return jsonify(responses)\n\n # sends the locations of each person for creating markers on the map\n\n # do a for loop for when I get more than 2 people meeting up", "def search_current_auctions(request):\n query = request.GET.get('q')\n auction = Auction.objects.all()\n\n if query:\n results = auction.filter(Q(antiques__name__icontains=query) | Q(antiques__description__icontains=query))\n\n else:\n results = Auction.objects.all()\n\n pages = pagination(request, results, num=4)\n context = {\n 'items': pages[0],\n 'page_range': pages[1]\n }\n\n return render(request, \"showallauctions.html\", context)", "def check_url_format(self):\r\n m = re.match(\"^http://www.tesco.com/direct/[0-9a-zA-Z-]+/[0-9-]+\\.prd$\", self.product_page_url)\r\n n = re.match(\"^http://www.tesco.com/.*$\", self.product_page_url)\r\n return (not not m) or (not not n)" ]
[ "0.6154764", "0.6144359", "0.6003847", "0.5999131", "0.5904226", "0.5864869", "0.5856176", "0.5819513", "0.5761393", "0.5717995", "0.5688156", "0.5688023", "0.5634919", "0.5621996", "0.5603309", "0.5553236", "0.5548065", "0.5510001", "0.5509159", "0.5471066", "0.54406667", "0.5438181", "0.5436965", "0.54335934", "0.5427564", "0.5406087", "0.5395467", "0.5384261", "0.5372792", "0.53695387" ]
0.6176141
0
Check complexes search form url
def test_search_form_complexes_urls(self): r_keys = ['balcony_types', 'bathroom_type', 'building_floors_max', 'building_floors_min', 'building_type', 'decoration', 'elevators_type', 'floor_max', 'floor_min', 'infrastructure', 'living_area_max', 'living_area_min', 'metro_stations', 'price_per_m_max', 'price_per_m_min', 'regions', 'rooms_count', 'term_gc_max', 'term_gc_min', 'total_area_max', 'total_area_min'] r = self.check_request_keys("get", "search-forms/complexes/", r_keys) self.check_list_item_keys(r["balcony_types"], ['id', 'name']) self.check_list_item_keys(r["bathroom_type"], ['id', 'name']) self.assertIsInstance(r['building_floors_max'], int) self.assertIsInstance(r['building_floors_min'], int) self.check_list_item_keys(r["building_type"], ['id', 'name']) self.assertIsInstance(r['decoration'], list) self.assertEqual(r['decoration'], []) self.check_list_item_keys(r["elevators_type"], ['id', 'name']) self.assertIsInstance(r['floor_max'], int) self.assertIsInstance(r['floor_min'], int) self.assertIsInstance(r['infrastructure'], list) self.assertEqual(r['infrastructure'], []) self.assertIsInstance(r['living_area_max'], int) self.assertIsInstance(r['living_area_min'], int) self.check_list_item_keys(r["metro_stations"], ['id', 'name']) self.assertIsInstance(r['price_per_m_max'], int) self.assertIsInstance(r['price_per_m_min'], int) self.check_list_item_keys(r["regions"], ['format', 'id', 'locations', 'name', 'slug', 'typeBeforeLocation', 'typeName', 'typePrepositionalShortName', 'typeShortName']) self.check_list_items_type(r['rooms_count'], int) self.assertIsInstance(r['term_gc_max'], int) self.assertIsInstance(r['term_gc_min'], int)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query(url):", "def test_filter_search_form_is_valid(self):\r\n response = self.client.get(reverse('search_results'), {\r\n 'name': 'nutella',\r\n 'category': '1',\r\n 'nutriscore': 'd'\r\n })\r\n self.assertTrue(response.context['product_list'])", "def search(request):\n raise NotImplementedError", "def test_search_form_is_valid(self):\r\n response = self.client.get(reverse('search_results'), {\r\n 'name': 'product'\r\n })\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')", "def test_small_search_exists(self):\n\n search_html = 'agency search--box scrollable-dropdown-menu'\n\n response = self.client.get(reverse('learn'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('about'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('agencies'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developers'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developer'))\n self.assertContains(response, search_html)\n\n response = self.client.get(\n reverse(\n 'contact_landing',\n args=['department-of-commerce--census-bureau']))\n self.assertContains(response, search_html)", "def test_search_form_main_urls(self):\n r_keys = ['price_max', 'price_min', 'rooms_count']\n r = self.check_request_keys(\"get\", \"search-forms/main/\", r_keys)\n\n self.assertIsInstance(r['price_min'], int)\n self.assertIsInstance(r['price_max'], int)\n self.check_list_items_type(r['rooms_count'], int)", "def test_study_source_get_search_url_response(self):\n this_study = factories.StudyFactory.create()\n url = this_study.get_search_url()\n response = self.client.get(url)\n # url should work\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(response.context['form'], forms.SourceTraitSearchForm)", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def search(request):\n\n # get form data \n searchItem = request.GET.get(\"q\")\n # if searchItem is an exact match redirect to that page\n if (util.get_entry(searchItem) is not None):\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": searchItem\n }))\n # add any pages with the string in it to results list \n else: \n results = []\n substring = False\n for title in util.list_entries():\n if searchItem.upper() in title.upper():\n results.append(title)\n if results:\n substring = True\n # return results\n return render(request, \"encyclopedia/search.html\", {\n \"searchItem\": searchItem,\n \"substring\": substring,\n \"results\": results\n })", "def search(query_string):", "def search(request, is_my_list=\"False\"):\n\n search_type = request.GET.get(\"submit\")\n if search_type:\n\n # get query field\n query = ''\n if request.GET.get(search_type):\n query = request.GET.get(search_type)\n\n proj_ids = []\n cod_ids = []\n\n valid_searches = [constants.STRING_TITLE, constants.STRING_DESCRIPTION, constants.STRING_PROTOCOL,\n constants.STRING_CODER, constants.STRING_AREA, constants.STRING_WORKINGGROUP]\n\n search_in_all = True\n for v in valid_searches:\n if v in request.GET:\n search_in_all = False\n break\n\n if search_in_all or request.GET.get(constants.STRING_TITLE):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.title.lower():\n cod_ids.append(cod.id)\n\n if search_in_all or request.GET.get(constants.STRING_DESCRIPTION):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.additional_information.lower():\n cod_ids.append(cod.id)\n\n if request.GET.get(constants.STRING_PROTOCOL):\n proj_ids += ProjectContainer.objects.filter(protocol__icontains=query).values_list('id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_CODER):\n for pr in ProjectContainer.objects.all():\n for cd in pr.codings.all():\n user = Person.objects.using('datatracker').get(id=cd.coder)\n if query.lower() in user.name.lower():\n proj_ids.append(pr.id)\n break\n\n if search_in_all or request.GET.get(constants.STRING_AREA):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list(\n 'document__group__parent__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n # ids += ProjectContainer.objects.filter(docs__document__group__parent__name__icontains=query).values_list(\n # 'id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_WORKINGGROUP):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(\n DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('document__group__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n \n if cod_ids:\n cod_ids = list(set(cod_ids))\n proj_ids += ProjectContainer.objects.filter(codings__id__in=cod_ids).values_list('id', flat=True)\n project_containers = ProjectContainer.objects.filter(id__in=list(set(proj_ids)))\n \n request.session[constants.ALL_CODINGS] = cod_ids\n request.session[constants.ALL_PROJECTS] = project_containers\n\n request.session[constants.MAINTAIN_STATE] = True\n\n return HttpResponseRedirect(\n settings.CODESTAND_PREFIX + '/codestand/matches/show_list/' + \n is_my_list + '/{0}/'.format(constants.ATT_CREATION_DATE) + 'True')\n\n else:\n return render_page(request, constants.TEMPLATE_MATCHES_SEARCH, {\n \"form\": SearchForm()\n })", "def search(request):\n\n term = \"\"\n organizations = None\n memberships = None\n events = None\n persons = None\n airports = None\n training_requests = None\n comments = None\n only_result = None\n\n if request.method == \"GET\" and \"term\" in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n term = form.cleaned_data.get(\"term\", \"\")\n tokens = re.split(r\"\\s+\", term)\n\n organizations = Organization.objects.filter(\n Q(domain__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"fullname\")\n if len(organizations) == 1 and not only_result:\n only_result = organizations[0]\n\n memberships = Membership.objects.filter(\n registration_code__icontains=term\n ).order_by(\"-agreement_start\")\n if len(memberships) == 1 and not only_result:\n only_result = memberships[0]\n\n events = Event.objects.filter(\n Q(slug__icontains=term)\n | Q(host__domain__icontains=term)\n | Q(host__fullname__icontains=term)\n | Q(url__icontains=term)\n | Q(contact__icontains=term)\n | Q(venue__icontains=term)\n | Q(address__icontains=term)\n ).order_by(\"-slug\")\n if len(events) == 1 and not only_result:\n only_result = events[0]\n\n # if user searches for two words, assume they mean a person\n # name\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n (Q(personal__icontains=name1) & Q(family__icontains=name2))\n | (Q(personal__icontains=name2) & Q(family__icontains=name1))\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n )\n persons = Person.objects.filter(complex_q)\n else:\n persons = Person.objects.filter(\n Q(personal__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n ).order_by(\"family\")\n\n if len(persons) == 1 and not only_result:\n only_result = persons[0]\n\n airports = Airport.objects.filter(\n Q(iata__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"iata\")\n if len(airports) == 1 and not only_result:\n only_result = airports[0]\n\n training_requests = TrainingRequest.objects.filter(\n Q(group_name__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(github__icontains=term)\n | Q(affiliation__icontains=term)\n | Q(location__icontains=term)\n | Q(user_notes__icontains=term)\n )\n if len(training_requests) == 1 and not only_result:\n only_result = training_requests[0]\n\n comments = Comment.objects.filter(\n Q(comment__icontains=term)\n | Q(user_name__icontains=term)\n | Q(user_email__icontains=term)\n | Q(user__personal__icontains=term)\n | Q(user__family__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__github__icontains=term)\n ).prefetch_related(\"content_object\")\n if len(comments) == 1 and not only_result:\n only_result = comments[0]\n\n # only 1 record found? Let's move to it immediately\n if only_result and not form.cleaned_data[\"no_redirect\"]:\n msg = format_html(\n \"You were moved to this page, because your search <i>{}</i> \"\n \"yields only this result.\",\n term,\n )\n if isinstance(only_result, Comment):\n messages.success(request, msg)\n return redirect(\n only_result.content_object.get_absolute_url()\n + \"#c{}\".format(only_result.id)\n )\n elif hasattr(only_result, \"get_absolute_url\"):\n messages.success(request, msg)\n return redirect(only_result.get_absolute_url())\n\n else:\n messages.error(request, \"Fix errors below.\")\n\n # if empty GET, we'll create a blank form\n else:\n form = SearchForm()\n\n context = {\n \"title\": \"Search\",\n \"form\": form,\n \"term\": term,\n \"organisations\": organizations,\n \"memberships\": memberships,\n \"events\": events,\n \"persons\": persons,\n \"airports\": airports,\n \"comments\": comments,\n \"training_requests\": training_requests,\n }\n return render(request, \"dashboard/search.html\", context)", "def validate_url(self):\n pass", "def search(request):\n\n if request.method == \"POST\":\n form = SearchForm(request.POST)\n\n if form.is_valid():\n title = form.cleaned_data[\"title\"]\n entryMD = util.get_entry(title)\n\n print('search request: ', title)\n\n if entryMD:\n return redirect(reverse('entry', args=[title]))\n else:\n relatedTitles = util.relatedTitles(title)\n\n return render(request, \"encyclopedia/search.html\", {\n \"title\": title,\n \"relatedTitles\": relatedTitles,\n \"searchForm\": SearchForm()\n })\n return redirect(reverse('index'))", "def match_url(self, url):\n pass", "def search(self, query):", "def search():\n\n # POST\n if request.method == \"POST\":\n\n # validate form submission\n if not request.form.get(\"intervention\"):\n return render_template(\"results.html\", results=entries.values())\n ''' \n elif not request.form.get(\"setting\"):\n return apology(\"missing setting\")\n elif not request.form.get(\"emrpref\"):\n return apology(\"missing emr pref\")\n elif not request.form.get(\"budget\"):\n return apology(\"missing budget\")'''\n \n results = []\n for k in entries:\n print('entries', entries[k]['Keywords'])\n print('term', request.form.get(\"intervention\"))\n if request.form.get(\"intervention\") in entries[k]['Keywords']:\n print('ya')\n results.append(entries[k])\n\n\n return render_template(\"results.html\", results=results)\n\n\n # GET\n else:\n return render_template(\"search.html\")", "def __url_filter(self, model, iter, user_data):\n\t\tpattern = dict_filter[self.combobox2.get_model()[self.combobox2.get_active()][0]]\n\t\treturn pattern in str(model.get_value(iter, 0))", "def get_query_url(self, search_args):\n self._browser.open(\"http://poe.trade/\")\n # There are two forms, the second one is the search form\n # Both forms don't have names so we just know the 2nd one is the right one\n self._browser.form = list(self._browser.forms())[1]\n \n # Populate the forms with the stuff we want\n for form_name in search_args:\n control = self._browser.form.find_control(form_name)\n control.value = search_args[form_name]\n \n # By default we want people are are online and accepting buyouts\n buyout_control = self._browser.form.find_control(name=\"has_buyout\")\n online_control = self._browser.form.find_control(name=\"online\")\n buyout_control.value = [\"1\"]\n online_control.value = [\"x\"]\n \n search_response = self._browser.submit()\n return search_response.geturl()", "def API_company(request):\n query = request.GET\n if any(key for key in query if key not in API_COMPANY_VALIDKEYS):\n #print([(key,key not in API_COMPANY_VALIDKEYS) for key in query])\n return django.http.HttpResponseBadRequest(\"Invalid query\")\n if \"search\" in query:\n return API_companysearch(request)\n elif \"po\" in query:\n return API_companypo(request)\n return django.http.Http404()", "def form_search_url(self):\r\n self.reformat_search_for_spaces()\r\n self.target_yt_search_url_str = self.prefix_of_search_url + self.yt_search_key + self.filter_url_portion", "def search_form_servee(context, cl):\r\n return {\r\n \"request\": context[\"request\"],\r\n \"cl\": cl,\r\n \"show_result_count\": cl.result_count != cl.full_result_count,\r\n \"search_var\": \"q\"\r\n }", "def test_search_form_apartments_urls(self):\n r_keys = ['balcony_types', 'bathroom_type', 'building_floors_max',\n 'building_floors_min', 'building_type', 'decoration',\n 'elevators_type', 'floor_max', 'floor_min', 'infrastructure',\n 'living_area_max', 'living_area_min', 'metro_stations',\n 'price_per_m_max', 'price_per_m_min', 'regions', 'rooms_count',\n 'total_area_max', 'total_area_min']\n r = self.check_request_keys(\"get\", \"search-forms/apartments/\", r_keys)\n\n self.check_list_item_keys(r[\"balcony_types\"], ['id', 'name'])\n self.check_list_item_keys(r[\"bathroom_type\"], ['id', 'name'])\n self.assertIsInstance(r['building_floors_max'], int)\n self.assertIsInstance(r['building_floors_min'], int)\n self.check_list_item_keys(r[\"building_type\"], ['id', 'name'])\n self.assertIsInstance(r['decoration'], list)\n self.assertEqual(r['decoration'], [])\n self.check_list_item_keys(r[\"elevators_type\"], ['id', 'name'])\n self.assertIsInstance(r['floor_max'], int)\n self.assertIsInstance(r['floor_min'], int)\n self.assertIsInstance(r['infrastructure'], list)\n self.assertEqual(r['infrastructure'], [])\n self.assertIsInstance(r['living_area_max'], int)\n self.assertIsInstance(r['living_area_min'], int)\n self.check_list_item_keys(r[\"metro_stations\"], ['id', 'name'])\n self.assertIsInstance(r['price_per_m_max'], int)\n self.assertIsInstance(r['price_per_m_min'], int)\n self.check_list_item_keys(r[\"regions\"], ['format', 'id', 'locations', 'name', 'slug', 'typeBeforeLocation',\n 'typeName', 'typePrepositionalShortName', 'typeShortName'])\n self.check_list_items_type(r['rooms_count'], int)\n self.assertIsInstance(r['total_area_max'], int)\n self.assertIsInstance(r['total_area_min'], int)", "def search_against_url(request, url):\n\n (scheme, _1, _2, _3, _4, _5) = urlparse(url)\n if scheme not in ('http', 'https'):\n return search_page(request, error='The URL must begin with either http or https.')\n\n sfm = from_django_conf('sidebyside')\n try:\n (title, text) = fetch_and_clean(url)\n except requests.exceptions.Timeout:\n return search_page(request, error=\"Sorry, that news article couldn't be retrieved.\")\n\n try:\n sfm_results = sfm.search(text=text, title=title, url=url)\n drop_silly_results(sfm_results)\n sort_by_coverage(sfm_results)\n\n\n #if they submit a url, don't return the exact same url in the results\n for r in sfm_results['documents']['rows']:\n if r.get('url') == url:\n sfm_results['documents']['rows'].remove(r)\n\n if sfm_results.has_key('text'): text = sfm_results['text']\n else: text = ''\n\n if sfm_results.has_key('title'): title = sfm_results['title']\n else: title='No Title'\n\n return search_result_page(request, sfm_results, text,\n source_title=title, source_url=url)\n except superfastmatch.SuperFastMatchError, e:\n if e.status == httplib.NOT_FOUND:\n raise HttpResponse('No such article {0}'.format(url))\n elif settings.DEBUG == True:\n return HttpResponse(e.response[1], status=e.response[0])\n else:\n raise", "def hyperlink_search(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\tif 'UniProtKB Accession' in request.GET and request.GET['UniProtKB Accession'] or \\\n\t'Protein' in request.GET and request.GET['Protein'] or \\\n\t'Gene' in request.GET and request.GET['Gene'] or \\\n\t'Organism' in request.GET and request.GET['Organism'] or \\\n\t'Organismid' in request.GET and request.GET['Organismid'] or \\\n\t'SubCellular' in request.GET and request.GET['SubCellular'] or \\\n\t'Peptide Sequence' in request.GET and request.GET['Peptide Sequence'] or \\\n\t'Pathway Name' in request.GET and request.GET['Pathway Name'] or \\\n\t'Disease Name' in request.GET and request.GET['Disease Name'] or \\\n\t'Go ID' in request.GET and request.GET['Go ID'] or \\\n\t'Go Name' in request.GET and request.GET['Go Name'] or \\\n\t'Go Term' in request.GET and request.GET['Go Term'] or \\\n\t'AssayFdaApproveMark' in request.GET and request.GET['AssayFdaApproveMark']:\n\t\tuseruniprotkb =\"\"\n\t\tuserprotein =\"\"\n\t\tusergeneid =\"\"\n\t\tuserorg=\"\"\n\t\tuserorgid=\"\"\n\t\tusersubcell =\"\"\n\t\tuserpepseq =\"\"\n\t\tuserpathway =\"\"\n\t\tuserdis =\"\"\n\t\tusergoid =\"\"\n\t\tusergotn =\"\"\n\t\tusergot=\"\"\n\t\tuserassayfdaapprovemark=\"\"\n\t\tfinalsearhdata=''\n\t\ttry:\n\t\t\tuseruniprotkb = request.GET[\"UniProtKB Accession\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in useruniprotkb:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('|')\n\t\telse:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('\\\\n')\n\t\tuseruniprotkb=[(item.strip()).lower() for item in useruniprotkb]\n\t\tuseruniprotkb=map(str, useruniprotkb)\n\t\tuseruniprotkb=filter(None, useruniprotkb)\n\n\t\ttry:\n\t\t\tuserprotein = request.GET[\"Protein\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userprotein:\n\t\t\tuserprotein=(userprotein.strip()).split('|')\n\t\telse:\n\t\t\tuserprotein=(userprotein.strip()).split('\\\\n')\n\t\tuserprotein=[(item.strip()).lower() for item in userprotein]\n\t\tuserprotein=map(str, userprotein)\n\t\tuserprotein=filter(None, userprotein)\n\n\t\ttry:\n\t\t\tusergeneid = request.GET[\"Gene\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergeneid:\n\t\t\tusergeneid=(usergeneid.strip()).split('|')\n\t\telse:\n\t\t\tusergeneid=(usergeneid.strip()).split('\\\\n')\n\t\tusergeneid=[(item.strip()).lower() for item in usergeneid]\n\t\tusergeneid=map(str, usergeneid)\n\t\tusergeneid=filter(None, usergeneid)\n\n\t\ttry:\n\t\t\tuserorg = request.GET[\"Organism\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorg:\n\t\t\tuserorg=(userorg.strip()).split('|')\n\t\telse:\n\t\t\tuserorg=(userorg.strip()).split('\\\\n')\n\t\tuserorg=[(item.strip()).lower() for item in userorg]\n\t\tuserorg=map(str, userorg)\n\t\tuserorg=filter(None, userorg)\n\n\t\ttry:\n\t\t\tuserorgid = request.GET[\"Organismid\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorgid:\n\t\t\tuserorgid=(userorgid.strip()).split('|')\n\t\telse:\n\t\t\tuserorgid=(userorgid.strip()).split('\\\\n')\n\t\tuserorgid=[(item.strip()).lower() for item in userorgid]\n\t\tuserorgid=map(str, userorgid)\n\t\tuserorgid=filter(None, userorgid)\n\n\t\ttry:\n\t\t\tusersubcell = request.GET[\"SubCellular\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usersubcell:\n\t\t\tusersubcell=(usersubcell.strip()).split('|')\n\t\telse:\n\t\t\tusersubcell=(usersubcell.strip()).split('\\\\n')\n\t\tusersubcell=[(item.strip()).lower() for item in usersubcell]\n\t\tusersubcell=map(str, usersubcell)\n\t\tusersubcell=filter(None, usersubcell)\n\n\t\ttry:\n\t\t\tuserpepseq = request.GET[\"Peptide Sequence\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpepseq:\n\t\t\tuserpepseq=(userpepseq.strip()).split('|')\n\t\telse:\n\t\t\tuserpepseq=(userpepseq.strip()).split('\\\\n')\n\t\tuserpepseq=[(item.strip()).lower() for item in userpepseq]\n\t\tuserpepseq=map(str, userpepseq)\n\t\tuserpepseq=filter(None, userpepseq)\n\n\t\ttry:\n\t\t\tuserpathway = request.GET[\"Pathway Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpathway:\n\t\t\tuserpathway=(userpathway.strip()).split('|')\n\t\telse:\n\t\t\tuserpathway=(userpathway.strip()).split('\\\\n')\n\t\tuserpathway=[(item.strip()).lower() for item in userpathway]\n\t\tuserpathway=map(str, userpathway)\n\t\tuserpathway=filter(None, userpathway)\n\n\t\ttry:\n\t\t\tuserdis = request.GET[\"Disease Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userdis:\n\t\t\tuserdis=(userdis.strip()).split('|')\n\t\telse:\n\t\t\tuserdis=(userdis.strip()).split('\\\\n')\n\t\tuserdis=[(item.strip()).lower() for item in userdis]\n\t\tuserdis=map(str, userdis)\n\t\tuserdis=filter(None, userdis)\n\n\t\ttry:\n\t\t\tusergoid = request.GET[\"Go ID\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergoid:\n\t\t\tusergoid=(usergoid.strip()).split('|')\n\t\telse:\n\t\t\tusergoid=(usergoid.strip()).split('\\\\n')\n\t\tusergoid=[(item.strip()).lower() for item in usergoid]\n\t\tusergoid=map(str, usergoid)\n\t\tusergoid=filter(None, usergoid)\n\n\t\ttry:\n\t\t\tusergotn = request.GET[\"Go Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergotn:\n\t\t\tusergotn=(usergotn.strip()).split('|')\n\t\telse:\n\t\t\tusergotn=(usergotn.strip()).split('\\\\n')\n\t\tusergotn=[(item.strip()).lower() for item in usergotn]\n\t\tusergotn=map(str, usergotn)\n\t\tusergotn=filter(None, usergotn)\n\n\t\ttry:\n\t\t\tusergot = request.GET[\"Go Term\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergot:\n\t\t\tusergot=(usergot.strip()).split('|')\n\t\telse:\n\t\t\tusergot=(usergot.strip()).split('\\\\n')\n\t\tusergot=[(item.strip()).lower() for item in usergot]\n\t\tusergot=map(str, usergot)\n\t\tusergot=filter(None, usergot)\n\n\t\ttry:\n\t\t\tuserassayfdaapprovemark = request.GET[\"AssayFdaApproveMark\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userassayfdaapprovemark:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('|')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\telse:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('\\\\n')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\tuserassayfdaapprovemark=[(item.strip()).lower() for item in userassayfdaapprovemark]\n\t\tuserassayfdaapprovemark=map(str, userassayfdaapprovemark)\n\t\tuserassayfdaapprovemark=filter(None, userassayfdaapprovemark)\n\n\t\tspquerylist =[]\n\t\tsearchtermlist=[]\n\n\t\tif len(useruniprotkb) >0:\n\t\t\tfinalsearhdata+='UniProtKB Accession:'+';'.join(useruniprotkb)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in useruniprotkb:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"UniProtKB Accession.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userprotein)> 0:\n\t\t\tfinalsearhdata+='Protein:'+';'.join(userprotein)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userprotein:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Protein.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergeneid) >0:\n\t\t\tfinalsearhdata+='Gene:'+';'.join(usergeneid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergeneid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Gene.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorg) > 0:\n\t\t\tfinalsearhdata+='Organism:'+';'.join(userorg)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorg:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorgid) > 0:\n\t\t\tfinalsearhdata+='Organism ID:'+';'.join(userorgid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorgid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usersubcell) >0:\n\t\t\tfinalsearhdata+='SubCellular:'+';'.join(usersubcell)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usersubcell:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"SubCellular.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpepseq) >0:\n\t\t\tfinalsearhdata+='Peptide Sequence:'+';'.join(userpepseq)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpepseq:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Peptide Sequence.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpathway) >0:\n\t\t\tfinalsearhdata+='Pathway Name:'+';'.join(userpathway)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpathway:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Pathway Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userdis) >0:\n\t\t\tfinalsearhdata+='Disease Name:'+';'.join(userdis)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userdis:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Disease Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergoid) >0:\n\t\t\tfinalsearhdata+='Go ID:'+';'.join(usergoid)+' '\n\t\t\tsdict={}\n\t\t\tsdict[\"Go ID.ngram\"]=[i.split(' ')[0] for i in usergoid]\n\t\t\ttdict={}\n\t\t\ttdict[\"terms\"]=sdict\n\t\t\tsearchtermlist.append(tdict)\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergoid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]+={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergotn) >0:\n\t\t\tfinalsearhdata+='Go Name:'+';'.join(usergotn)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergotn:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergot) > 0:\n\t\t\tfinalsearhdata+='Go Term:'+';'.join(usergot)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergot:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Term.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(userassayfdaapprovemark) > 0:\n\t\t\tfinalsearhdata+='Assays for FDA approved Marker::'+';'.join(userassayfdaapprovemark)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userassayfdaapprovemark:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Assays for FDA approved Marker.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(searchtermlist)>0:\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\n\t\t\tquery={\n\t\t\t\t\"query\": {\n\t\t\t\t\t\"bool\": {\n\t\t\t\t\t\t\"must\":searchtermlist\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tnameFIle=names.get_first_name()\n\t\t\tjsonfilename=nameFIle+'_advance_search.json'\n\t\t\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'results', jsonfilename)\n\t\t\tjsonfileoutput= open(jsonfilepath,'w')\n\t\t\tjfinaldata=[]\n\t\t\tres=helpers.scan(client=es,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=30)\n\t\t\tjfinaldata=[]\n\t\t\tfor i in res:\n\t\t\t\tjdic=i['_source']\n\t\t\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\t\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\t\t\tjdic[\"sel\"] =\"\"\n\t\t\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\t\t\tjdic[\"Mean Concentration\"] =jdic[\"Mean Concentration\"].replace('fmol/','fmol/µ')\n\t\t\t\t\tjdic[\"Concentration\"] =jdic[\"Concentration\"].replace('fmol/','fmol/µ')\t\t\t\t\t\n\t\t\t\t\tjfinaldata.append(jdic)\n\n\t\t\tfoundHits=len(jfinaldata)\n\t\t\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\t\t\tjsonfileoutput.close()\n\n\t\t\tif foundHits >0:\n\t\t\t\tstatsummary=summaryStatcal(jfinaldata)\n\t\t\t\tpathwaychart=statsummary['pathwaychart']\n\t\t\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\t\t\tspecieslist=statsummary['specieslist']\n\t\t\t\ttotallist=statsummary['total']\n\t\t\t\tsubcell=statsummary['subcell']\n\t\t\t\tgodic=statsummary['godic']\n\t\t\t\tjvennprot=statsummary['jevennstat'][0]\n\t\t\t\tjvennpep=statsummary['jevennstat'][1]\n\t\t\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\t\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1]))\n\t\t\t\tupdatedgodic=dict(list(sortedgodic.items())[:10])\n\t\t\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries']))\n\t\t\t\tprodataseries=statsummary['prodataseries']\n\t\t\t\tunqisostat=statsummary['unqisostat']\n\t\t\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'statsummary', jsonfilename)\n\t\t\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\t\t\tjson.dumps(statsummary,jsonfileoutputStat)\n\t\t\t\tjsonfileoutputStat.close()\n\t\t\t\turlname=\"'/resultFile/jsonData/resultJson/adavancesearch/results/\"+jsonfilename+\"'\"\n\t\t\t\tcontextindex={\n\t\t\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),\n\t\t\t\t\t'query': finalsearhdata,'foundHits':foundHits,\n\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t\t\t}\n\t\t\t\treturn render(request,'resultform.html',contextindex)\n\t\t\telse:\n\t\t\t\treturn render(request,'resultform.html',{'foundHits':foundHits})", "def test_get_search_page_url(self):\n\n keywords = [\"design\", \"desk\"]\n search_page_base_url = self.test_data[\"search_page_base_url\"]\n\n fragment = quote(\" \".join(keywords))\n url = urljoin(search_page_base_url, fragment)\n\n self.assertEqual(self.retriever._get_search_page_url(keywords, 1), url, \\\n msg = \"The search page URL is malformed\" )\n\n next_page_query = \"?\" + urlencode({\"page\" : str(2)})\n url += next_page_query\n\n self.assertEqual(self.retriever._get_search_page_url(keywords, 2), url, \\\n msg = \"The search page URL is malformed\" )", "def genSearch(request):\n \n assert isinstance(request, HttpRequest)\n booklist=[]\n form = request.GET.copy();\n searchvalue =form['query']\n for k,v in get_valid_Books().items():\n if searchvalue.lower() in v.title.lower() or searchvalue.lower() in v.desc.lower() or searchvalue.lower() in v.a_id.name.lower():\n booklist.append(v)\n if booklist is None:\n clearfilter=\"False\"\n else:\n clearfilter=\"True\"\n\n return render(\n request,\n 'app/about.html',\n {\n 'title':'Books',\n 'books':booklist,\n 'clearfilter':clearfilter,\n 'year':datetime.now().year,\n }\n )", "def test_search_form(self):\n set_up_one_user(self, 1, 1)\n login = self.client.login(username='test', password='2HJ1vRV0Z&3iD')\n response = self.client.post(reverse('index'), {'terms_en': 'Test Search', 'websites': [self.website.pk]})\n s = Search.objects.filter(terms_en=\"Test Search\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(s), 1)", "def search(self, *args, **kwargs):", "def search_page(request):\n if request.method == \"GET\":\n page = request.GET.get('q')\n entries = util.list_entries()\n entries_set=set(entries)\n\n if page in entries_set:\n return render(request, \"encyclopedia/visit_entry.html\",{\n \"entry\": util.get_entry(page),\n \"title\": page\n })\n \n else:\n results = list(filter(lambda x: page in x, entries))\n return render(request, \"encyclopedia/search_page.html\",{\n \"results\": results\n })" ]
[ "0.62470764", "0.61839455", "0.61307776", "0.60680735", "0.60604334", "0.6051853", "0.5925678", "0.5914352", "0.59062356", "0.588627", "0.5855097", "0.58526474", "0.5848683", "0.5813221", "0.57736385", "0.5726085", "0.57079494", "0.5657176", "0.56479686", "0.5645858", "0.56225014", "0.55809236", "0.55767167", "0.55698436", "0.5538656", "0.5533891", "0.5530331", "0.55097693", "0.55058414", "0.55043757" ]
0.6926931
0
Check main search form url
def test_search_form_main_urls(self): r_keys = ['price_max', 'price_min', 'rooms_count'] r = self.check_request_keys("get", "search-forms/main/", r_keys) self.assertIsInstance(r['price_min'], int) self.assertIsInstance(r['price_max'], int) self.check_list_items_type(r['rooms_count'], int)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(request):\n\n if request.method == \"POST\":\n form = SearchForm(request.POST)\n\n if form.is_valid():\n title = form.cleaned_data[\"title\"]\n entryMD = util.get_entry(title)\n\n print('search request: ', title)\n\n if entryMD:\n return redirect(reverse('entry', args=[title]))\n else:\n relatedTitles = util.relatedTitles(title)\n\n return render(request, \"encyclopedia/search.html\", {\n \"title\": title,\n \"relatedTitles\": relatedTitles,\n \"searchForm\": SearchForm()\n })\n return redirect(reverse('index'))", "def search(request):\n\n # get form data \n searchItem = request.GET.get(\"q\")\n # if searchItem is an exact match redirect to that page\n if (util.get_entry(searchItem) is not None):\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": searchItem\n }))\n # add any pages with the string in it to results list \n else: \n results = []\n substring = False\n for title in util.list_entries():\n if searchItem.upper() in title.upper():\n results.append(title)\n if results:\n substring = True\n # return results\n return render(request, \"encyclopedia/search.html\", {\n \"searchItem\": searchItem,\n \"substring\": substring,\n \"results\": results\n })", "def get_query_url(self, search_args):\n self._browser.open(\"http://poe.trade/\")\n # There are two forms, the second one is the search form\n # Both forms don't have names so we just know the 2nd one is the right one\n self._browser.form = list(self._browser.forms())[1]\n \n # Populate the forms with the stuff we want\n for form_name in search_args:\n control = self._browser.form.find_control(form_name)\n control.value = search_args[form_name]\n \n # By default we want people are are online and accepting buyouts\n buyout_control = self._browser.form.find_control(name=\"has_buyout\")\n online_control = self._browser.form.find_control(name=\"online\")\n buyout_control.value = [\"1\"]\n online_control.value = [\"x\"]\n \n search_response = self._browser.submit()\n return search_response.geturl()", "def form_search_url(self):\r\n self.reformat_search_for_spaces()\r\n self.target_yt_search_url_str = self.prefix_of_search_url + self.yt_search_key + self.filter_url_portion", "def search(request):\n raise NotImplementedError", "def searchForm(self, search=None, replace=None):\n if not search or not replace:\n raise cherrypy.HTTPError(400, \"Bad request\")\n redirurl = \"/{}/{}/\".format(search, replace)\n raise cherrypy.HTTPRedirect(redirurl)", "def search_query():\n g.form.process(request.form)\n\n if g.form.submit.data and g.form.search.data:\n query = g.form.search.data\n try:\n result = search.search_code(query)\n except search.NoPostcode:\n # Pass along to search results page to process\n return redirect(url_for(\".search_results\", query=query))\n\n if isinstance(result, models.StopPoint):\n return redirect(url_for(\".stop_atco\", atco_code=result.atco_code))\n elif isinstance(result, models.Postcode):\n return redirect(url_for(\".list_near_postcode\", code=result.text))\n else:\n return redirect(url_for(\".search_results\", query=query))\n else:\n return redirect(url_for(\".search_results\"))", "def test_small_search_exists(self):\n\n search_html = 'agency search--box scrollable-dropdown-menu'\n\n response = self.client.get(reverse('learn'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('about'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('agencies'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developers'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developer'))\n self.assertContains(response, search_html)\n\n response = self.client.get(\n reverse(\n 'contact_landing',\n args=['department-of-commerce--census-bureau']))\n self.assertContains(response, search_html)", "def isSearchRequest(self):\n return re.search('Search.+Request', self.name) is not None", "def test_search_form_is_valid(self):\r\n response = self.client.get(reverse('search_results'), {\r\n 'name': 'product'\r\n })\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')", "def check_searchin(self):\r\n\r\n self.limit_panel_toggle()\r\n\r\n pth = self.m_searchin_text.GetValue()\r\n if not self.searchin_update:\r\n if isdir(pth):\r\n self.m_searchin_dir_picker.SetPath(pth)\r\n elif isfile(pth):\r\n self.m_searchin_dir_picker.SetPath(dirname(pth))\r\n self.searchin_update = False", "def search():\n # Check for database tables\n check_db()\n # Check for GET data\n search_query = request.args.get(\"q\", None)\n # Format search results as HTML\n search_results = get_search_results_html(search_query)\n # Format recent searches as HTML\n recent_searches = get_recent_searches_html()\n\n return html_wrapper('<h1>' + SITE_NAME + '''</h1>\n <form action=\"/\" method=\"GET\">\n <input type=\"text\" name=\"q\">\n <input type=\"submit\" value=\"search\">\n </form>''' + search_results + recent_searches)", "def validate_url(self):\n pass", "def QAsearch():\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer',word=question))\n return render_template(\n 'QAsearch.html',\n title = 'QAsearch Page',\n year = datetime.now().year,\n form = form,\n question = question\n )", "def get_search_url(free_text_search):\n url = baseUrl + \"data/\"\n if not free_text_search:\n url += \"warehouse/\"\n url += \"search?\"\n return url", "def post(self):\n query = self.request.get('search')\n if query:\n self.redirect('/searchdemo/charlie?' + urllib.urlencode(\n #{'query': query}))\n {'query': query.encode('utf-8')}))\n else:\n self.redirect('/searchdemo/charlie/')", "def match_url(self, url):\n pass", "def test_01_search(self):\r\n res = self.app.get('/search')\r\n err_msg = \"Search page should be accessible\"\r\n assert \"Search\" in res.data, err_msg", "def hyperlink_search(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\tif 'UniProtKB Accession' in request.GET and request.GET['UniProtKB Accession'] or \\\n\t'Protein' in request.GET and request.GET['Protein'] or \\\n\t'Gene' in request.GET and request.GET['Gene'] or \\\n\t'Organism' in request.GET and request.GET['Organism'] or \\\n\t'Organismid' in request.GET and request.GET['Organismid'] or \\\n\t'SubCellular' in request.GET and request.GET['SubCellular'] or \\\n\t'Peptide Sequence' in request.GET and request.GET['Peptide Sequence'] or \\\n\t'Pathway Name' in request.GET and request.GET['Pathway Name'] or \\\n\t'Disease Name' in request.GET and request.GET['Disease Name'] or \\\n\t'Go ID' in request.GET and request.GET['Go ID'] or \\\n\t'Go Name' in request.GET and request.GET['Go Name'] or \\\n\t'Go Term' in request.GET and request.GET['Go Term'] or \\\n\t'AssayFdaApproveMark' in request.GET and request.GET['AssayFdaApproveMark']:\n\t\tuseruniprotkb =\"\"\n\t\tuserprotein =\"\"\n\t\tusergeneid =\"\"\n\t\tuserorg=\"\"\n\t\tuserorgid=\"\"\n\t\tusersubcell =\"\"\n\t\tuserpepseq =\"\"\n\t\tuserpathway =\"\"\n\t\tuserdis =\"\"\n\t\tusergoid =\"\"\n\t\tusergotn =\"\"\n\t\tusergot=\"\"\n\t\tuserassayfdaapprovemark=\"\"\n\t\tfinalsearhdata=''\n\t\ttry:\n\t\t\tuseruniprotkb = request.GET[\"UniProtKB Accession\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in useruniprotkb:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('|')\n\t\telse:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('\\\\n')\n\t\tuseruniprotkb=[(item.strip()).lower() for item in useruniprotkb]\n\t\tuseruniprotkb=map(str, useruniprotkb)\n\t\tuseruniprotkb=filter(None, useruniprotkb)\n\n\t\ttry:\n\t\t\tuserprotein = request.GET[\"Protein\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userprotein:\n\t\t\tuserprotein=(userprotein.strip()).split('|')\n\t\telse:\n\t\t\tuserprotein=(userprotein.strip()).split('\\\\n')\n\t\tuserprotein=[(item.strip()).lower() for item in userprotein]\n\t\tuserprotein=map(str, userprotein)\n\t\tuserprotein=filter(None, userprotein)\n\n\t\ttry:\n\t\t\tusergeneid = request.GET[\"Gene\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergeneid:\n\t\t\tusergeneid=(usergeneid.strip()).split('|')\n\t\telse:\n\t\t\tusergeneid=(usergeneid.strip()).split('\\\\n')\n\t\tusergeneid=[(item.strip()).lower() for item in usergeneid]\n\t\tusergeneid=map(str, usergeneid)\n\t\tusergeneid=filter(None, usergeneid)\n\n\t\ttry:\n\t\t\tuserorg = request.GET[\"Organism\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorg:\n\t\t\tuserorg=(userorg.strip()).split('|')\n\t\telse:\n\t\t\tuserorg=(userorg.strip()).split('\\\\n')\n\t\tuserorg=[(item.strip()).lower() for item in userorg]\n\t\tuserorg=map(str, userorg)\n\t\tuserorg=filter(None, userorg)\n\n\t\ttry:\n\t\t\tuserorgid = request.GET[\"Organismid\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorgid:\n\t\t\tuserorgid=(userorgid.strip()).split('|')\n\t\telse:\n\t\t\tuserorgid=(userorgid.strip()).split('\\\\n')\n\t\tuserorgid=[(item.strip()).lower() for item in userorgid]\n\t\tuserorgid=map(str, userorgid)\n\t\tuserorgid=filter(None, userorgid)\n\n\t\ttry:\n\t\t\tusersubcell = request.GET[\"SubCellular\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usersubcell:\n\t\t\tusersubcell=(usersubcell.strip()).split('|')\n\t\telse:\n\t\t\tusersubcell=(usersubcell.strip()).split('\\\\n')\n\t\tusersubcell=[(item.strip()).lower() for item in usersubcell]\n\t\tusersubcell=map(str, usersubcell)\n\t\tusersubcell=filter(None, usersubcell)\n\n\t\ttry:\n\t\t\tuserpepseq = request.GET[\"Peptide Sequence\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpepseq:\n\t\t\tuserpepseq=(userpepseq.strip()).split('|')\n\t\telse:\n\t\t\tuserpepseq=(userpepseq.strip()).split('\\\\n')\n\t\tuserpepseq=[(item.strip()).lower() for item in userpepseq]\n\t\tuserpepseq=map(str, userpepseq)\n\t\tuserpepseq=filter(None, userpepseq)\n\n\t\ttry:\n\t\t\tuserpathway = request.GET[\"Pathway Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpathway:\n\t\t\tuserpathway=(userpathway.strip()).split('|')\n\t\telse:\n\t\t\tuserpathway=(userpathway.strip()).split('\\\\n')\n\t\tuserpathway=[(item.strip()).lower() for item in userpathway]\n\t\tuserpathway=map(str, userpathway)\n\t\tuserpathway=filter(None, userpathway)\n\n\t\ttry:\n\t\t\tuserdis = request.GET[\"Disease Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userdis:\n\t\t\tuserdis=(userdis.strip()).split('|')\n\t\telse:\n\t\t\tuserdis=(userdis.strip()).split('\\\\n')\n\t\tuserdis=[(item.strip()).lower() for item in userdis]\n\t\tuserdis=map(str, userdis)\n\t\tuserdis=filter(None, userdis)\n\n\t\ttry:\n\t\t\tusergoid = request.GET[\"Go ID\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergoid:\n\t\t\tusergoid=(usergoid.strip()).split('|')\n\t\telse:\n\t\t\tusergoid=(usergoid.strip()).split('\\\\n')\n\t\tusergoid=[(item.strip()).lower() for item in usergoid]\n\t\tusergoid=map(str, usergoid)\n\t\tusergoid=filter(None, usergoid)\n\n\t\ttry:\n\t\t\tusergotn = request.GET[\"Go Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergotn:\n\t\t\tusergotn=(usergotn.strip()).split('|')\n\t\telse:\n\t\t\tusergotn=(usergotn.strip()).split('\\\\n')\n\t\tusergotn=[(item.strip()).lower() for item in usergotn]\n\t\tusergotn=map(str, usergotn)\n\t\tusergotn=filter(None, usergotn)\n\n\t\ttry:\n\t\t\tusergot = request.GET[\"Go Term\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergot:\n\t\t\tusergot=(usergot.strip()).split('|')\n\t\telse:\n\t\t\tusergot=(usergot.strip()).split('\\\\n')\n\t\tusergot=[(item.strip()).lower() for item in usergot]\n\t\tusergot=map(str, usergot)\n\t\tusergot=filter(None, usergot)\n\n\t\ttry:\n\t\t\tuserassayfdaapprovemark = request.GET[\"AssayFdaApproveMark\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userassayfdaapprovemark:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('|')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\telse:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('\\\\n')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\tuserassayfdaapprovemark=[(item.strip()).lower() for item in userassayfdaapprovemark]\n\t\tuserassayfdaapprovemark=map(str, userassayfdaapprovemark)\n\t\tuserassayfdaapprovemark=filter(None, userassayfdaapprovemark)\n\n\t\tspquerylist =[]\n\t\tsearchtermlist=[]\n\n\t\tif len(useruniprotkb) >0:\n\t\t\tfinalsearhdata+='UniProtKB Accession:'+';'.join(useruniprotkb)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in useruniprotkb:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"UniProtKB Accession.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userprotein)> 0:\n\t\t\tfinalsearhdata+='Protein:'+';'.join(userprotein)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userprotein:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Protein.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergeneid) >0:\n\t\t\tfinalsearhdata+='Gene:'+';'.join(usergeneid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergeneid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Gene.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorg) > 0:\n\t\t\tfinalsearhdata+='Organism:'+';'.join(userorg)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorg:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorgid) > 0:\n\t\t\tfinalsearhdata+='Organism ID:'+';'.join(userorgid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorgid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usersubcell) >0:\n\t\t\tfinalsearhdata+='SubCellular:'+';'.join(usersubcell)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usersubcell:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"SubCellular.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpepseq) >0:\n\t\t\tfinalsearhdata+='Peptide Sequence:'+';'.join(userpepseq)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpepseq:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Peptide Sequence.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpathway) >0:\n\t\t\tfinalsearhdata+='Pathway Name:'+';'.join(userpathway)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpathway:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Pathway Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userdis) >0:\n\t\t\tfinalsearhdata+='Disease Name:'+';'.join(userdis)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userdis:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Disease Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergoid) >0:\n\t\t\tfinalsearhdata+='Go ID:'+';'.join(usergoid)+' '\n\t\t\tsdict={}\n\t\t\tsdict[\"Go ID.ngram\"]=[i.split(' ')[0] for i in usergoid]\n\t\t\ttdict={}\n\t\t\ttdict[\"terms\"]=sdict\n\t\t\tsearchtermlist.append(tdict)\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergoid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]+={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergotn) >0:\n\t\t\tfinalsearhdata+='Go Name:'+';'.join(usergotn)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergotn:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergot) > 0:\n\t\t\tfinalsearhdata+='Go Term:'+';'.join(usergot)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergot:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Term.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(userassayfdaapprovemark) > 0:\n\t\t\tfinalsearhdata+='Assays for FDA approved Marker::'+';'.join(userassayfdaapprovemark)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userassayfdaapprovemark:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Assays for FDA approved Marker.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(searchtermlist)>0:\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\n\t\t\tquery={\n\t\t\t\t\"query\": {\n\t\t\t\t\t\"bool\": {\n\t\t\t\t\t\t\"must\":searchtermlist\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tnameFIle=names.get_first_name()\n\t\t\tjsonfilename=nameFIle+'_advance_search.json'\n\t\t\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'results', jsonfilename)\n\t\t\tjsonfileoutput= open(jsonfilepath,'w')\n\t\t\tjfinaldata=[]\n\t\t\tres=helpers.scan(client=es,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=30)\n\t\t\tjfinaldata=[]\n\t\t\tfor i in res:\n\t\t\t\tjdic=i['_source']\n\t\t\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\t\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\t\t\tjdic[\"sel\"] =\"\"\n\t\t\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\t\t\tjdic[\"Mean Concentration\"] =jdic[\"Mean Concentration\"].replace('fmol/','fmol/µ')\n\t\t\t\t\tjdic[\"Concentration\"] =jdic[\"Concentration\"].replace('fmol/','fmol/µ')\t\t\t\t\t\n\t\t\t\t\tjfinaldata.append(jdic)\n\n\t\t\tfoundHits=len(jfinaldata)\n\t\t\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\t\t\tjsonfileoutput.close()\n\n\t\t\tif foundHits >0:\n\t\t\t\tstatsummary=summaryStatcal(jfinaldata)\n\t\t\t\tpathwaychart=statsummary['pathwaychart']\n\t\t\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\t\t\tspecieslist=statsummary['specieslist']\n\t\t\t\ttotallist=statsummary['total']\n\t\t\t\tsubcell=statsummary['subcell']\n\t\t\t\tgodic=statsummary['godic']\n\t\t\t\tjvennprot=statsummary['jevennstat'][0]\n\t\t\t\tjvennpep=statsummary['jevennstat'][1]\n\t\t\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\t\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1]))\n\t\t\t\tupdatedgodic=dict(list(sortedgodic.items())[:10])\n\t\t\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries']))\n\t\t\t\tprodataseries=statsummary['prodataseries']\n\t\t\t\tunqisostat=statsummary['unqisostat']\n\t\t\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'statsummary', jsonfilename)\n\t\t\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\t\t\tjson.dumps(statsummary,jsonfileoutputStat)\n\t\t\t\tjsonfileoutputStat.close()\n\t\t\t\turlname=\"'/resultFile/jsonData/resultJson/adavancesearch/results/\"+jsonfilename+\"'\"\n\t\t\t\tcontextindex={\n\t\t\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),\n\t\t\t\t\t'query': finalsearhdata,'foundHits':foundHits,\n\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t\t\t}\n\t\t\t\treturn render(request,'resultform.html',contextindex)\n\t\t\telse:\n\t\t\t\treturn render(request,'resultform.html',{'foundHits':foundHits})", "def search_page(request):\n if request.method == \"GET\":\n page = request.GET.get('q')\n entries = util.list_entries()\n entries_set=set(entries)\n\n if page in entries_set:\n return render(request, \"encyclopedia/visit_entry.html\",{\n \"entry\": util.get_entry(page),\n \"title\": page\n })\n \n else:\n results = list(filter(lambda x: page in x, entries))\n return render(request, \"encyclopedia/search_page.html\",{\n \"results\": results\n })", "def degruyterCheckSite(url):\n dgtestPhrase = 'Licensed Access'\n dgtestPhrase2 = 'viewbooktoc'\n\n # urltoCheck = input(\"\\n what is the URL? \\n\")\n\n urltoCheck = url\n\n r = requests.get(urltoCheck)\n rResult = r.text\n\n dgoutcome = 0\n if (dgtestPhrase in rResult) and (dgtestPhrase2 in rResult):\n dgoutcome = 1\n\n return dgoutcome", "def test_search_4(self):\n\n # search for \"cheese\"\n form = FrontSearchForm()\n form.search_box.set_value('cheese')\n form.submit.click()\n\n # check that results are shown\n AppBar() \\\n .result_stats.should(be.visible)", "def post(request):\r\n\r\n if 'submitSearch' in request.POST:\r\n searchValue = request.POST['submitSearch']\r\n values = searchValue.replace(' ', '+')\r\n\r\n return HttpResponseRedirect('/feedreader/search/?keywords=' + values)\r\n\r\n\r\n return HttpResponseRedirect('/feedreader/manage')", "def test_view_url_exists_at_desired_location(self):\r\n response = self.client.get(reverse('search_results'),\r\n {'query': '', 'name': 'nutella'})\r\n self.assertEqual(response.status_code, 200)", "def query(url):", "def test_get_search_page_url(self):\n\n keywords = [\"design\", \"desk\"]\n search_page_base_url = self.test_data[\"search_page_base_url\"]\n\n fragment = quote(\" \".join(keywords))\n url = urljoin(search_page_base_url, fragment)\n\n self.assertEqual(self.retriever._get_search_page_url(keywords, 1), url, \\\n msg = \"The search page URL is malformed\" )\n\n next_page_query = \"?\" + urlencode({\"page\" : str(2)})\n url += next_page_query\n\n self.assertEqual(self.retriever._get_search_page_url(keywords, 2), url, \\\n msg = \"The search page URL is malformed\" )", "def test_search_720(self):\n self.driver.get(self.domain)\n self.assertTrue(u'XXXX' in\n self.driver.page_source, 'Title text not found')\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n wait = ui.WebDriverWait(self.driver, 5)\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n search.click()\n search_field = self.driver.find_element_by_css_selector(\"#XXXX\")\n search_field.send_keys(\"XXXX\")\n search_field.submit()\n try:\n wait.until(lambda driver: u\"XXXX\" in\n self.driver.find_element_by_css_selector(\"xxxx > a\").text,\n 'Not found!')\n except:\n current_url = self.driver.current_url\n resp = requests.get(current_url)\n if resp.status_code != 200:\n raise Exception(\"Search failed! => [%s] %s\" % (resp.status_code,\n current_url))", "def test_study_source_get_search_url_response(self):\n this_study = factories.StudyFactory.create()\n url = this_study.get_search_url()\n response = self.client.get(url)\n # url should work\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(response.context['form'], forms.SourceTraitSearchForm)", "def other_search(self):\n test = self.ask_zoekarg.text()\n if test:\n self.parent().search_arg = test\n self.parent().do_select()", "def search():\n pass" ]
[ "0.6459624", "0.62796915", "0.6123323", "0.6115619", "0.61025727", "0.6093292", "0.607711", "0.6075454", "0.60183024", "0.5996655", "0.59779406", "0.5963711", "0.58934385", "0.5891925", "0.5888652", "0.5886857", "0.5879968", "0.58128816", "0.5796384", "0.5794324", "0.5788601", "0.5771388", "0.57424057", "0.5738986", "0.573163", "0.5725577", "0.57224315", "0.57209176", "0.57098854", "0.57050055" ]
0.65516233
0
Check autocomplete companies url
def test_autocomplete_companies_urls(self): r = self.base_check_request("get", "autocomplete/companies/") self.assertIsInstance(r, list) self.assertEqual(len(r), 10, "Invalid default count") ac_keys = ['id', 'name', 'type_name'] for ac in r: # check response objects structure self.assertListEqual(sorted(list(ac.keys())), ac_keys) # check response types self.assertIsInstance(ac['name'], str) self.assertIsInstance(ac['type_name'], str) self.assertIsInstance(ac['id'], int)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def API_company(request):\n query = request.GET\n if any(key for key in query if key not in API_COMPANY_VALIDKEYS):\n #print([(key,key not in API_COMPANY_VALIDKEYS) for key in query])\n return django.http.HttpResponseBadRequest(\"Invalid query\")\n if \"search\" in query:\n return API_companysearch(request)\n elif \"po\" in query:\n return API_companypo(request)\n return django.http.Http404()", "def test_autocomplete_locations_urls(self):\n r = self.base_check_request(\"get\", \"autocomplete/locations/\")\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 10, \"Invalid default count\")\n\n ac_keys = ['ancestors', 'id', 'is_region', 'name', 'prepositional_name',\n 'slug', 'text_for_apartments_search',\n 'text_for_complexes_search', 'type_name']\n # ac_keys_full = ac_keys + [\"metro_stations\"]\n for ac in r:\n # check response objects structure\n self.assertListEqual(sorted(list(ac.keys())), ac_keys)\n\n # check response types\n # self.check_list_item_keys(ac[\"ancestors\"], ac_keys_full)\n self.assertIsInstance(ac['id'], int)\n self.assertIsInstance(ac['is_region'], bool)\n self.assertIsInstance(ac['name'], str)\n self.assertIsInstance(ac['prepositional_name'], str)\n self.assertIsInstance(ac['slug'], str)\n self.assertIsInstance(ac['text_for_apartments_search'], (str, type(None)))\n self.assertIsInstance(ac['text_for_complexes_search'], (str, type(None)))\n self.assertIsInstance(ac['type_name'], str)", "def test_companies(self, setup_data):\n term = 'abc defg'\n\n url = reverse('api-v3:search:basic')\n response = self.api_client.get(\n url,\n data={\n 'term': term,\n 'entity': 'company',\n },\n )\n\n assert response.status_code == status.HTTP_200_OK\n assert response.data['count'] == 2\n assert response.data['results'][0]['name'].startswith(term)\n assert [{'count': 2, 'entity': 'company'}] == response.data['aggregations']", "def check_url(url):\n return 'products.json' in url", "def test_companies_no_term(self, setup_data):\n url = reverse('api-v3:search:basic')\n response = self.api_client.get(url, {})\n\n assert response.status_code == status.HTTP_400_BAD_REQUEST", "def validate_url(self):\n pass", "def __url_filter(self, model, iter, user_data):\n\t\tpattern = dict_filter[self.combobox2.get_model()[self.combobox2.get_active()][0]]\n\t\treturn pattern in str(model.get_value(iter, 0))", "def test_all_companies(self, setup_data):\n url = reverse('api-v3:search:basic')\n response = self.api_client.get(\n url,\n data={\n 'term': '',\n 'entity': 'company',\n },\n )\n\n assert response.status_code == status.HTTP_200_OK\n assert response.data['count'] > 0", "def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False", "def _find_impl(url, query, count, auto_complete):\n try:\n res = requests.get(\n url,\n params={\"q\": query, \"count\": count, \"autoCorrect\": (\"true\" if auto_complete else \"false\")},\n )\n except (requests.ConnectionError, requests.ConnectTimeout):\n return \"`connection error`\"\n\n try:\n data = json.loads(res.content.decode(\"utf-8\"))\n except ValueError:\n return \"`no valid json`\"\n #print(data)\n\n if not data.get(\"value\"):\n return \"Nix\"\n\n return [v[\"url\"] for v in data[\"value\"]]", "def test_website_companies_get_details(self):\n pass", "def test_autocomplete_complexes_urls(self):\n r = self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 10, \"Invalid default count\")\n\n ac_keys = ['id', 'name', 'type_name']\n for ac in r:\n # check response objects structure\n self.assertListEqual(sorted(list(ac.keys())), ac_keys)\n\n # check response types\n self.assertIsInstance(ac['name'], str)\n self.assertIsInstance(ac['type_name'], str)\n self.assertIsInstance(ac['id'], int)", "def test_small_search_exists(self):\n\n search_html = 'agency search--box scrollable-dropdown-menu'\n\n response = self.client.get(reverse('learn'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('about'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('agencies'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developers'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developer'))\n self.assertContains(response, search_html)\n\n response = self.client.get(\n reverse(\n 'contact_landing',\n args=['department-of-commerce--census-bureau']))\n self.assertContains(response, search_html)", "def get_url(soup):\r\n \"\"\"criteria: any(s in a[\"title\"] for s in ('新增', '確診', '肺炎')\"\"\"\r\n url_list = []\r\n for a in soup.find_all('a', {\"href\": re.compile(\"typeid=9$\")}):\r\n if any(s in a[\"title\"] for s in ('新增', '確診', '肺炎')):\r\n url = \"https://www.cdc.gov.tw\" + a['href']\r\n url_list.append(url)\r\n return url_list", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "def url_combo_activated(self, valid):\r\n text = to_text_string(self.url_combo.currentText())\r\n self.go_to(self.text_to_url(text))", "async def getAutocompleteConfig(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getAutocompleteConfig()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/search/autocomplete/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/search/autocomplete/\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")", "def url_construction(company):\n postcode = company[\"registered_address\"].strip()\n postcode = postcode.split(\" \")\n for i in range(len(postcode) - 1, 0, -1): # loop backwards in the obtained string\n if postcode[i].strip().isdigit(): # if the obtained string is fully a number\n postcode = postcode[i].strip()\n break\n\n keyword = company[\"name\"].strip().replace(\" \",\n \"%20\").strip() # gets the name and replaces empty spaces with \"%20\" in order to be used as a keyword in the url\n keyword = keyword.replace(\"&\",\n \"%26\").strip() # gets the name and replaces & symbols with \"%26\" in order to be used as a keyword in the url\n\n url = \"https://www.xing.com/search/companies?zip_code=\" + postcode + \"&keywords=\" + keyword # making the full url of the search operation\n return url", "def degruyterCheckSite(url):\n dgtestPhrase = 'Licensed Access'\n dgtestPhrase2 = 'viewbooktoc'\n\n # urltoCheck = input(\"\\n what is the URL? \\n\")\n\n urltoCheck = url\n\n r = requests.get(urltoCheck)\n rResult = r.text\n\n dgoutcome = 0\n if (dgtestPhrase in rResult) and (dgtestPhrase2 in rResult):\n dgoutcome = 1\n\n return dgoutcome", "def API_companysearch(request):\n company = request.GET.get(\"search\")\n company = str(company).strip()\n results = models.Company.objects.filter(name__icontains = company)\n results = [[company.pk,company.name] for company in results]\n return django.http.JsonResponse({\"success\":True,\"results\":results})", "def test_splits_urls_for_nouns(self):\r\n test_value = \"http://google.com/drives/autonomous/cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def clean_url(self):\n allowed_domains = (\"https://www.kurnik.pl\", \"https://www.playok.com\")\n url = self.cleaned_data[\"url\"]\n print(check_domain(url))\n if check_domain(url) in allowed_domains and url[-3:] == \"txt\":\n return url\n raise forms.ValidationError(\n \"Invalid url, only games from kurnik.pl\" \" or playok.com are allowed\"\n )", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)", "def test_splits_url_parts(self):\r\n test_value = \"http://google.com/drives-autonomous_cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def search_autocomplete(request):\n response = HttpResponse(content_type='application/json')\n query = request.GET.get('query', None)\n if query:\n try:\n suggestions = []\n for node in nc.get_indexed_node(nc.graphdb.manager, 'name', query):\n suggestions.append(node['name'])\n d = {'query': query, 'suggestions': suggestions, 'data': []}\n json.dump(d, response)\n except Exception:\n pass\n return response\n return False", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def test_pht_test_queries_without_pht_in_string(self):\n url = self.get_url()\n for query in self.TEST_PHT_QUERIES:\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_PHT_QUERIES[query]\n # Make sure number of matches is as expected.\n self.assertEqual(len(returned_pks), len(expected_matches))\n # Make sure the matches that are found are the ones expected.\n for expected_pht in expected_matches:\n expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk\n self.assertIn(expected_pk, returned_pks,\n msg=\"Could not find expected pht {} with query '{}'\".format(expected_pht, query))", "def test_pht_test_queries_without_pht_in_string(self):\n url = self.get_url()\n for query in self.TEST_PHT_QUERIES:\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_PHT_QUERIES[query]\n # Make sure number of matches is as expected.\n self.assertEqual(len(returned_pks), len(expected_matches))\n # Make sure the matches that are found are the ones expected.\n for expected_pht in expected_matches:\n expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk\n self.assertIn(expected_pk, returned_pks,\n msg=\"Could not find expected pht {} with query '{}'\".format(expected_pht, query))", "def get_suggestions(db_company):\n if db_company.archived:\n return []\n\n names = [\n db_company.name,\n *db_company.trading_names,\n ]\n\n data = [\n *itertools.chain(\n *[name.split(' ') for name in names],\n ),\n *names,\n ]\n\n return list(filter(None, set(data)))", "def __editAutoCompleteFromAPIs(self):\n self.activeWindow().autoCompleteFromAPIs()" ]
[ "0.6253605", "0.58379394", "0.57610047", "0.573596", "0.57312334", "0.5679476", "0.5679242", "0.56641346", "0.56231385", "0.56179494", "0.5549065", "0.5520094", "0.55199593", "0.55163765", "0.55016613", "0.54806185", "0.5457696", "0.5435207", "0.5428943", "0.54196036", "0.5412326", "0.5411279", "0.5355417", "0.53346395", "0.53115505", "0.5304847", "0.53005093", "0.53005093", "0.5281062", "0.5277312" ]
0.6856514
0
Check autocomplete complexes url
def test_autocomplete_complexes_urls(self): r = self.base_check_request("get", "autocomplete/complexes/") self.assertIsInstance(r, list) self.assertEqual(len(r), 10, "Invalid default count") ac_keys = ['id', 'name', 'type_name'] for ac in r: # check response objects structure self.assertListEqual(sorted(list(ac.keys())), ac_keys) # check response types self.assertIsInstance(ac['name'], str) self.assertIsInstance(ac['type_name'], str) self.assertIsInstance(ac['id'], int)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_url(self):\n pass", "def check_url(url):\n return 'products.json' in url", "def test_autocomplete_locations_urls(self):\n r = self.base_check_request(\"get\", \"autocomplete/locations/\")\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 10, \"Invalid default count\")\n\n ac_keys = ['ancestors', 'id', 'is_region', 'name', 'prepositional_name',\n 'slug', 'text_for_apartments_search',\n 'text_for_complexes_search', 'type_name']\n # ac_keys_full = ac_keys + [\"metro_stations\"]\n for ac in r:\n # check response objects structure\n self.assertListEqual(sorted(list(ac.keys())), ac_keys)\n\n # check response types\n # self.check_list_item_keys(ac[\"ancestors\"], ac_keys_full)\n self.assertIsInstance(ac['id'], int)\n self.assertIsInstance(ac['is_region'], bool)\n self.assertIsInstance(ac['name'], str)\n self.assertIsInstance(ac['prepositional_name'], str)\n self.assertIsInstance(ac['slug'], str)\n self.assertIsInstance(ac['text_for_apartments_search'], (str, type(None)))\n self.assertIsInstance(ac['text_for_complexes_search'], (str, type(None)))\n self.assertIsInstance(ac['type_name'], str)", "def __url_filter(self, model, iter, user_data):\n\t\tpattern = dict_filter[self.combobox2.get_model()[self.combobox2.get_active()][0]]\n\t\treturn pattern in str(model.get_value(iter, 0))", "def test_splits_url_parts(self):\r\n test_value = \"http://google.com/drives-autonomous_cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def _find_impl(url, query, count, auto_complete):\n try:\n res = requests.get(\n url,\n params={\"q\": query, \"count\": count, \"autoCorrect\": (\"true\" if auto_complete else \"false\")},\n )\n except (requests.ConnectionError, requests.ConnectTimeout):\n return \"`connection error`\"\n\n try:\n data = json.loads(res.content.decode(\"utf-8\"))\n except ValueError:\n return \"`no valid json`\"\n #print(data)\n\n if not data.get(\"value\"):\n return \"Nix\"\n\n return [v[\"url\"] for v in data[\"value\"]]", "def match_url(self, url):\n pass", "def test_autocomplete_companies_urls(self):\n r = self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 10, \"Invalid default count\")\n\n ac_keys = ['id', 'name', 'type_name']\n for ac in r:\n # check response objects structure\n self.assertListEqual(sorted(list(ac.keys())), ac_keys)\n\n # check response types\n self.assertIsInstance(ac['name'], str)\n self.assertIsInstance(ac['type_name'], str)\n self.assertIsInstance(ac['id'], int)", "def test_splits_urls_for_nouns(self):\r\n test_value = \"http://google.com/drives/autonomous/cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def search_autocomplete(request):\n response = HttpResponse(content_type='application/json')\n query = request.GET.get('query', None)\n if query:\n try:\n suggestions = []\n for node in nc.get_indexed_node(nc.graphdb.manager, 'name', query):\n suggestions.append(node['name'])\n d = {'query': query, 'suggestions': suggestions, 'data': []}\n json.dump(d, response)\n except Exception:\n pass\n return response\n return False", "def url_shortner(self):", "def query(url):", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "def test_search_form_complexes_urls(self):\n r_keys = ['balcony_types', 'bathroom_type', 'building_floors_max',\n 'building_floors_min', 'building_type', 'decoration',\n 'elevators_type', 'floor_max', 'floor_min', 'infrastructure',\n 'living_area_max', 'living_area_min', 'metro_stations',\n 'price_per_m_max', 'price_per_m_min', 'regions', 'rooms_count',\n 'term_gc_max', 'term_gc_min', 'total_area_max', 'total_area_min']\n r = self.check_request_keys(\"get\", \"search-forms/complexes/\", r_keys)\n\n self.check_list_item_keys(r[\"balcony_types\"], ['id', 'name'])\n self.check_list_item_keys(r[\"bathroom_type\"], ['id', 'name'])\n self.assertIsInstance(r['building_floors_max'], int)\n self.assertIsInstance(r['building_floors_min'], int)\n self.check_list_item_keys(r[\"building_type\"], ['id', 'name'])\n self.assertIsInstance(r['decoration'], list)\n self.assertEqual(r['decoration'], [])\n self.check_list_item_keys(r[\"elevators_type\"], ['id', 'name'])\n self.assertIsInstance(r['floor_max'], int)\n self.assertIsInstance(r['floor_min'], int)\n self.assertIsInstance(r['infrastructure'], list)\n self.assertEqual(r['infrastructure'], [])\n self.assertIsInstance(r['living_area_max'], int)\n self.assertIsInstance(r['living_area_min'], int)\n self.check_list_item_keys(r[\"metro_stations\"], ['id', 'name'])\n self.assertIsInstance(r['price_per_m_max'], int)\n self.assertIsInstance(r['price_per_m_min'], int)\n self.check_list_item_keys(r[\"regions\"],\n ['format', 'id', 'locations', 'name', 'slug', 'typeBeforeLocation',\n 'typeName', 'typePrepositionalShortName', 'typeShortName'])\n self.check_list_items_type(r['rooms_count'], int)\n self.assertIsInstance(r['term_gc_max'], int)\n self.assertIsInstance(r['term_gc_min'], int)", "def canHandleUrl(cls, url):\n return url.startswith(\"https://cc0textures.com/view.php?tex=\")", "def clean_url(self):\n url = self.cleaned_data['url']\n\n if url:\n pattern = re.compile(r'https?://(www\\.)?instagr(\\.am|am\\.com)/p/\\S+')\n if not pattern.match(url):\n raise forms.ValidationError('Please provide a valid instagram link.')\n\n return url", "def validate_long_url(form, field):\n for regex in LinkForm.rejected_regexes:\n if regex.search(field.data):\n raise ValidationError(\"That URL is not allowed.\")", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def valid_url(self):\r\n if self.resolver:\r\n return True\r\n return False", "def __isUrl(self, url):\n if type(url)==str:\n return url.startswith('http://') or url.startswith('https://')\n return False", "def _match(cls, url, **kwargs):\n return url.scheme.startswith('http')", "def validaURL(url: AnyStr) -> bool:\n\n return re.compile(patternURL).search(url) != None # Linea 1", "def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False", "def isValidURL(self, url):\n if \"imdb.com\" in url:\n return True\n else:\n return False", "def test_pht_test_queries_with_pht_in_string(self):\n url = self.get_url()\n for query in self.TEST_PHT_QUERIES:\n response = self.client.get(url, {'q': 'pht' + query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_PHT_QUERIES[query]\n # Make sure number of matches is as expected.\n self.assertEqual(len(returned_pks), len(expected_matches))\n # Make sure the matches that are found are the ones expected.\n for expected_pht in expected_matches:\n expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk\n self.assertIn(expected_pk, returned_pks,\n msg=\"Could not find expected pht {} with query '{}'\".format(expected_pht, query))", "def test_pht_test_queries_with_pht_in_string(self):\n url = self.get_url()\n for query in self.TEST_PHT_QUERIES:\n response = self.client.get(url, {'q': 'pht' + query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_PHT_QUERIES[query]\n # Make sure number of matches is as expected.\n self.assertEqual(len(returned_pks), len(expected_matches))\n # Make sure the matches that are found are the ones expected.\n for expected_pht in expected_matches:\n expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk\n self.assertIn(expected_pk, returned_pks,\n msg=\"Could not find expected pht {} with query '{}'\".format(expected_pht, query))", "def test_pht_test_queries_without_pht_in_string(self):\n url = self.get_url()\n for query in self.TEST_PHT_QUERIES:\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_PHT_QUERIES[query]\n # Make sure number of matches is as expected.\n self.assertEqual(len(returned_pks), len(expected_matches))\n # Make sure the matches that are found are the ones expected.\n for expected_pht in expected_matches:\n expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk\n self.assertIn(expected_pk, returned_pks,\n msg=\"Could not find expected pht {} with query '{}'\".format(expected_pht, query))", "def test_pht_test_queries_without_pht_in_string(self):\n url = self.get_url()\n for query in self.TEST_PHT_QUERIES:\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_PHT_QUERIES[query]\n # Make sure number of matches is as expected.\n self.assertEqual(len(returned_pks), len(expected_matches))\n # Make sure the matches that are found are the ones expected.\n for expected_pht in expected_matches:\n expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk\n self.assertIn(expected_pk, returned_pks,\n msg=\"Could not find expected pht {} with query '{}'\".format(expected_pht, query))", "def test_autocomplete_recipe_search(self):\n pass", "def testLongURL(self):\n self.assertEqual([], grab('www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www.www', self.needScheme))" ]
[ "0.64022815", "0.6359344", "0.6158054", "0.6140897", "0.59668964", "0.5876774", "0.5855907", "0.5841358", "0.5820167", "0.57811606", "0.5778149", "0.5756644", "0.57285386", "0.56883585", "0.5687888", "0.5670359", "0.5656254", "0.5651676", "0.5642779", "0.5618405", "0.5603773", "0.560093", "0.5589333", "0.5587002", "0.55029595", "0.55029595", "0.5502057", "0.5502057", "0.5485521", "0.5480313" ]
0.64943016
0
Check autocomplete locations url
def test_autocomplete_locations_urls(self): r = self.base_check_request("get", "autocomplete/locations/") self.assertIsInstance(r, list) self.assertEqual(len(r), 10, "Invalid default count") ac_keys = ['ancestors', 'id', 'is_region', 'name', 'prepositional_name', 'slug', 'text_for_apartments_search', 'text_for_complexes_search', 'type_name'] # ac_keys_full = ac_keys + ["metro_stations"] for ac in r: # check response objects structure self.assertListEqual(sorted(list(ac.keys())), ac_keys) # check response types # self.check_list_item_keys(ac["ancestors"], ac_keys_full) self.assertIsInstance(ac['id'], int) self.assertIsInstance(ac['is_region'], bool) self.assertIsInstance(ac['name'], str) self.assertIsInstance(ac['prepositional_name'], str) self.assertIsInstance(ac['slug'], str) self.assertIsInstance(ac['text_for_apartments_search'], (str, type(None))) self.assertIsInstance(ac['text_for_complexes_search'], (str, type(None))) self.assertIsInstance(ac['type_name'], str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_url(self):\n pass", "def test_locationSearch(self):\n sel = self.selenium\n \n # Login\n self.login()\n\n # L2inL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L2inL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L2inL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2inL0\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_L4\"))\n self.failIf(sel.is_visible(\"gis_location_label_L4\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # @ToDo: Verify that the result is stored correctly\n # How do we get name from number without submitting? SHould we just submit every time?\n \n\n # L2inL1withNoParent\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L2inL1withNoParent\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L2inL1withNoParent\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"L1withNoParent\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2inL1withNoParent\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_L4\"))\n self.failIf(sel.is_visible(\"gis_location_label_L4\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L3inL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L3inL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L3inL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"L3inL0\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # L3inL1withL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L3inL1withL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L3inL1withL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Ouest\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"L3inL1withL0\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L3inL1withNoParent\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L3inL1withNoParent\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L3inL1withNoParent\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"L1withNoParent\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"L3inL1withNoParent\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L4inL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL0\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L4inL1withL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL1withL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL1withL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Ouest\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL1withL0\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L4inL1withNoParent\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL1withNoParent\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL1withNoParent\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"L1withNoParent\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL1withNoParent\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L4inL2withL1L0 \n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL2withL1L0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL2withL1L0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Ouest\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Port-Au-Prince\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL2withL1L0\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # L4inL2withL1only\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL2withL1only\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL2withL1only\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"L1withNoParent\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2inL1withNoParent\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL2withL1only\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # L4inL2withL0only\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL2withL0only\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL2withL0only\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2inL0\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL2withL0only\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # L4inL2withNoParent\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL2withNoParent\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL2withNoParent\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2withNoParent\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL2withNoParent\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))", "def test_view_url_exists_at_desired_location(self):\r\n response = self.client.get(reverse('search_results'),\r\n {'query': '', 'name': 'nutella'})\r\n self.assertEqual(response.status_code, 200)", "def check_url(url):\n return 'products.json' in url", "def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "def browse_location(self, level=0, URL_location=None):", "def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False", "def valid_url(self):\r\n if self.resolver:\r\n return True\r\n return False", "def search_autocomplete(request):\n response = HttpResponse(content_type='application/json')\n query = request.GET.get('query', None)\n if query:\n try:\n suggestions = []\n for node in nc.get_indexed_node(nc.graphdb.manager, 'name', query):\n suggestions.append(node['name'])\n d = {'query': query, 'suggestions': suggestions, 'data': []}\n json.dump(d, response)\n except Exception:\n pass\n return response\n return False", "def _discoverLocation(self, uri):", "def test_autocomplete_companies_urls(self):\n r = self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 10, \"Invalid default count\")\n\n ac_keys = ['id', 'name', 'type_name']\n for ac in r:\n # check response objects structure\n self.assertListEqual(sorted(list(ac.keys())), ac_keys)\n\n # check response types\n self.assertIsInstance(ac['name'], str)\n self.assertIsInstance(ac['type_name'], str)\n self.assertIsInstance(ac['id'], int)", "def offer(self, url):\n parts = urlparse(url)\n return bool(self.KT_RE.match(parts.netloc))", "def test_locations(self):\n url = reverse(\"locations\", args=[00000])\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(isinstance(response.data, list))\n self.assertTrue(response.data) # list not empty\n\n location_data = response.data[0]\n data_keys = [\n \"title\",\n \"address\",\n \"address2\",\n \"city\",\n \"state\",\n \"postalCode\",\n \"distance\",\n \"hours\",\n \"phone\",\n \"geocode\",\n ]\n self.assertEqual(list(location_data.keys()), data_keys)", "def test_urls(self):\n self.base_check_request(\"get\", \"/\")\n self.base_check_request(\"get\", \"apartments/\")\n self.base_check_request(\"get\", \"complexes/\")\n self.base_check_request(\"get\", \"locations/\")\n self.base_check_request(\"get\", \"companies/\")\n self.base_check_request(\"get\", \"companies-types/\")\n\n self.base_check_request(\"get\", \"count/apartments/\")\n self.base_check_request(\"get\", \"count/complexes/\")\n\n self.base_check_request(\"get\", \"search-forms/apartments/\")\n self.base_check_request(\"get\", \"search-forms/complexes/\")\n self.base_check_request(\"get\", \"search-forms/main/\")\n\n self.base_check_request(\"get\", \"autocomplete/companies/\")\n self.base_check_request(\"get\", \"autocomplete/complexes/\")\n self.base_check_request(\"get\", \"autocomplete/locations/\")\n\n self.base_check_request(\"get\", \"apartments_for_maps/?count=1&fields=lat,lon\")\n # self.base_check_request(\"get\", \"reserve/\")\n # self.base_check_request(\"get\", \"complain/\")\n # self.base_check_request(\"post\", \"apartment-complain/\")\n # self.base_check_request(\"post\", \"order-apartment/\")", "def handle_suggest():\n return 0", "def get_all_locations(self):", "async def test_get_location_data(self):\n for city_name in ['dublin', 'London', 'Copenhagen']:\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ))\n self.assertEqual(response.code, HTTPStatus.OK)\n self.check_city_response(response, city_name.lower())", "def _find_impl(url, query, count, auto_complete):\n try:\n res = requests.get(\n url,\n params={\"q\": query, \"count\": count, \"autoCorrect\": (\"true\" if auto_complete else \"false\")},\n )\n except (requests.ConnectionError, requests.ConnectTimeout):\n return \"`connection error`\"\n\n try:\n data = json.loads(res.content.decode(\"utf-8\"))\n except ValueError:\n return \"`no valid json`\"\n #print(data)\n\n if not data.get(\"value\"):\n return \"Nix\"\n\n return [v[\"url\"] for v in data[\"value\"]]", "def is_served_area(self, location):\n\t\tis_served = False\n\t\tcode = 500\n\n\t\turl = self.base_url\n\n\t\thtml, code = self.crawler.get(url)\n\n\t\tif code == 200:\n\t\t\t# Getting form data \n\t\t\tself.parser.set_html(html)\n\t\t\tform_data = self.parser.get_postal_code_form_data()\n\t\t\tdata = form_data['data']\n\t\t\turl = self.properurl(form_data['url'])\n\n\t\t\tdata['enteredZipCode'] = location['postal_code']\n\n\t\t\thtml, code = self.crawler.post(url, data)\n\t\t\tself.parser.set_html(html)\n\t\t\tdata_delivery = self.parser.get_form_delivery_zone()\n\n\t\t\tif data_delivery['type'] == 'address':\n\t\t\t\thtml, code = self.crawler.search_adress('%s, %s %s'%(location['address'].encode('utf8', 'replace'),location['postal_code'].encode('utf8', 'replace'), location['city_name'].encode('utf8', 'replace')))\n\t\t\t\tsuggetions = self.parser.extract_suggested_addresses(html)\n\t\t\t\t[s.update({'url': self.properurl(s['url'])} )for s in suggetions]\n\n\t\t\t\tif len(suggetions) > 0:\n\t\t\t\t\t# There is at least one suggestion, select the first\n\t\t\t\t\taddress = suggetions[0]\n\n\t\t\t\t\t# Now set this address\n\t\t\t\t\thtml, code = self.crawler.set_address(address)\n\t\t\t\t\tself.parser.set_html(html)\n\t\t\t\t\tform_data = self.parser.get_form_delivery_zone()\n\t\t\t\t\tform_data['form']['url'] = self.properurl(form_data['form']['url'])\n\t\t\t\t\thtml, code = self.crawler.set_delivery(form_data)\n\t\t\t\t\tif code == 200:\n\t\t\t\t\t\tis_served = True\n\n\t\t\telif data_delivery['type'] == 'select':\n\t\t\t\tdata_delivery['form']['url'] = self.properurl(data_delivery['form']['url'])\n\t\t\t\tif 'radiogroup' in data_delivery['form']['data'] and 'LAD' in data_delivery['form']['data']['radiogroup']:\n\t\t\t\t\thtml, code = self.crawler.set_delivery(data_delivery)\n\t\t\t\t\tif code == 200:\n\t\t\t\t\t\tis_served = True\n\t\t\t\telse:\n\t\t\t\t\tis_served = False\n\n\t\telse:\n\t\t\tprint 'Error while fetching base url of Monoprix (code = %d)'%(code)\n\n\t\treturn is_served, code", "def test_splits_url_parts(self):\r\n test_value = \"http://google.com/drives-autonomous_cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def test_homepage_with_location(self):\r\n\r\n with self.client:\r\n response = self.client.get('/?location=US-CA')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'California News', response.data)\r\n\r\n response = self.client.get('/?location=US-FAKE')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'No data found for that region.', response.data)", "def test_splits_urls_for_nouns(self):\r\n test_value = \"http://google.com/drives/autonomous/cars\"\r\n self.assertEqual(\r\n set([u'cars', u'autonomous']),\r\n suggest_tags(test_value))", "def autocomplete():\n query = '' if request.args.get('query') is None else request.args.get('query')\n\n prefixed_words = []\n close_words = []\n for f in app.preprocessed.words:\n lowered = f.lower()\n if lowered.startswith(query) and lowered != query:\n prefixed_words.append(f)\n elif levenshtein(query, lowered) <= 1:\n close_words.append(f)\n\n result = {\n 'success': True,\n 'data': {\n 'suggestions': prefixed_words + close_words\n }\n }\n return jsonify(result)", "def test_address_correct(self):\n tester = app.test_client(self)\n response = tester.post(\"/result\",\n data = dict(location=\"Chennai\"),\n follow_redirects=True)\n self.assertIn(b\"Chennai, Chennai District, Tamil Nadu, 600001, India\", response.data)", "def test_pht_test_queries_without_pht_in_string(self):\n url = self.get_url()\n for query in self.TEST_PHT_QUERIES:\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_PHT_QUERIES[query]\n # Make sure number of matches is as expected.\n self.assertEqual(len(returned_pks), len(expected_matches))\n # Make sure the matches that are found are the ones expected.\n for expected_pht in expected_matches:\n expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk\n self.assertIn(expected_pk, returned_pks,\n msg=\"Could not find expected pht {} with query '{}'\".format(expected_pht, query))", "def test_pht_test_queries_without_pht_in_string(self):\n url = self.get_url()\n for query in self.TEST_PHT_QUERIES:\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_PHT_QUERIES[query]\n # Make sure number of matches is as expected.\n self.assertEqual(len(returned_pks), len(expected_matches))\n # Make sure the matches that are found are the ones expected.\n for expected_pht in expected_matches:\n expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk\n self.assertIn(expected_pk, returned_pks,\n msg=\"Could not find expected pht {} with query '{}'\".format(expected_pht, query))", "def test_view_url_exists_at_desired_location(self):\n response = self.client.get('')\n self.assertEqual(response.status_code, 200)", "def test_search_gateway_language(self):\n term = \"gl\"\n expected_results = [dict(pk=self.gl.pk, lc=self.gl.lc, ln=self.gl.ln, lr=self.gl.lr, ang=self.gl.ang)]\n request = RequestFactory().get(\"/ac/gateway-langnames/?q=\" + term)\n response = gateway_languages_autocomplete(request)\n data = json.loads(response.content)\n self.assertListEqual(data.get(\"results\"), expected_results)\n self.assertEqual(data.get(\"term\"), term)\n self.assertEqual(data.get(\"count\"), 1)", "def test_pht_test_queries_with_pht_in_string(self):\n url = self.get_url()\n for query in self.TEST_PHT_QUERIES:\n response = self.client.get(url, {'q': 'pht' + query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_PHT_QUERIES[query]\n # Make sure number of matches is as expected.\n self.assertEqual(len(returned_pks), len(expected_matches))\n # Make sure the matches that are found are the ones expected.\n for expected_pht in expected_matches:\n expected_pk = models.SourceDataset.objects.get(i_accession=expected_pht).pk\n self.assertIn(expected_pk, returned_pks,\n msg=\"Could not find expected pht {} with query '{}'\".format(expected_pht, query))" ]
[ "0.5915492", "0.58113354", "0.57620513", "0.5706289", "0.5672959", "0.5648629", "0.557731", "0.5552593", "0.5551483", "0.5526195", "0.5514765", "0.55085653", "0.54948395", "0.5471785", "0.54306555", "0.542994", "0.54257005", "0.542408", "0.54149646", "0.54077256", "0.53922635", "0.5387848", "0.5383354", "0.53456324", "0.5344899", "0.5321734", "0.5321734", "0.53150237", "0.5277967", "0.52506703" ]
0.72685176
0
Patch out render_template with a mock. Use when the return value of the view is not important to the test; rendering templates uses a ton of runtime.
def patch_render_template(self): mock_render = Mock(spec=render_template) mock_render.return_value = '' with patch('app.main.render_template', mock_render): yield mock_render
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_render_call_without_request(self, mock_render):\n context = MagicMock()\n render('template_name.html', context)\n mock_render.assert_called_with('template_name.html', context)", "def render_to_template_mock(*_args):", "def test_render_call_with_request(self, mock_render):\n mock_rc = MagicMock()\n with patch('helio.heliodjango.renderers.get_request_context', return_value=mock_rc) as mock_rc_init:\n context = MagicMock()\n request = MagicMock()\n render('template_name.html', context, request)\n mock_rc_init.assert_called_with(request)\n mock_render.assert_called_with('template_name.html', context, context_instance=mock_rc)", "def mock_render_to_response(template_name, context):\r\n # View confirm_email_change uses @transaction.commit_manually.\r\n # This simulates any db access in the templates.\r\n UserProfile.objects.exists()\r\n return HttpResponse(mock_render_to_string(template_name, context))", "def test_render_cached(self, mock_from_string, mock_sha1):\n template = SnippetTemplateFactory(code='asdf')\n cache_key = mock_sha1.return_value.hexdigest.return_value\n jinja_template = Mock()\n mock_cache = {cache_key: jinja_template}\n\n with patch('snippets.base.models.template_cache', mock_cache):\n result = template.render({})\n\n mock_sha1.assert_called_with('asdf')\n ok_(not mock_from_string.called)\n jinja_template.render.assert_called_with({'snippet_id': 0})\n eq_(result, jinja_template.render.return_value)", "def mock_render_to_string(template_name, context):\r\n return str((template_name, context))", "def test_render_not_cached(self, mock_from_string, mock_sha1):\n template = SnippetTemplateFactory(code='asdf')\n mock_cache = {}\n\n with patch('snippets.base.models.template_cache', mock_cache):\n result = template.render({})\n\n jinja_template = mock_from_string.return_value\n cache_key = mock_sha1.return_value.hexdigest.return_value\n eq_(mock_cache, {cache_key: jinja_template})\n\n mock_sha1.assert_called_with('asdf')\n mock_from_string.assert_called_with('asdf')\n jinja_template.render.assert_called_with({'snippet_id': 0})\n eq_(result, jinja_template.render.return_value)", "def mock_template(monkeypatch):\n monkeypatch.setattr(\"libgitlab.templating.host.os.fchown\", mock.Mock())\n monkeypatch.setattr(\"libgitlab.templating.host.os.chown\", mock.Mock())\n monkeypatch.setattr(\"libgitlab.templating.host.os.fchmod\", mock.Mock())", "def rendered(template):\r\n def was_rendered(client, response, testcase):\r\n testcase.assertTemplateUsed(response, template)\r\n return was_rendered", "def _render(template, context, app):\n context.update(app.jinja_env.globals)\n app.update_template_context(context)\n try:\n rv = template.render(**context)\n template_rendered.send(app, template=template, context=context)\n return rv\n except:\n translate = app.config.get(\"MAKO_TRANSLATE_EXCEPTIONS\")\n if translate:\n translated = TemplateError(template)\n raise translated\n else:\n raise", "def test_template_lookup_result_returned(self, template_override_mock):\n mock_template = mock.Mock()\n mock_template.name = 'site-1/base_site.html'\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = 'site-1'\n template_override_mock.return_value = mock_template\n context = context_processors.decide_base_template(request)\n self.assertEqual(\n context['base_template'], 'site-1/base_site.html'\n )", "def mock_render_to_string(template_name, context):\r\n return str((template_name, sorted(context.iteritems())))", "def test_render():\n from coffin.shortcuts import render\n response = render(None, 'render-x.html', {'x': 'foo'})\n assert response.content == 'foo'", "def test_website_not_set_if_not_match(self, template_override_mock):\n request = mock.Mock()\n request.resolver_match.kwargs.get.side_effect = Exception('something')\n request.path = '/'\n context_processors.decide_base_template(request)\n template_override_mock.assert_not_called()", "def render_to_response(template, context, request, *args, **kwargs):\n from django.shortcuts import render_to_response as rtr\n from django.template import RequestContext\n return rtr(template, context, context_instance=RequestContext(request), *args, **kwargs)", "def render_to(template=None, mimetype=\"text/html\"):\n def renderer(function):\n @wraps(function)\n def wrapper(request, *args, **kwargs):\n output = function(request, *args, **kwargs)\n if not isinstance(output, dict):\n return output\n tmpl = output.pop('TEMPLATE', template)\n return render_to_response(tmpl, output, \\\n context_instance=RequestContext(request), mimetype=mimetype)\n return wrapper\n return renderer", "def instrumented_test_render(self, context):\n signals.template_rendered.send(sender=self, template=self, context=context)\n return self.nodelist.render(context)", "def test_render_template(self):\n template = self.block.meta.template\n self.assertEqual(template, 'common/blocks/journals_tab_block.html', 'Templates were not the same')", "def test_render_template(self):\n template = self.block.meta.template\n self.assertEqual(template, 'common/blocks/google_calendar.html', 'The templates are not the same')", "def RenderResponse(self, template, **context):\n jinja2_renderer = jinja2.get_jinja2(app=self.app)\n rendered_value = jinja2_renderer.render_template(template, **context)\n self.response.write(rendered_value)", "def test_view_uses_correct_template(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'home.html')", "def test_get_request_invoke_render_with_form_object(self):\n\n mock_render = patch(\n 'identity.views.DeleteProjectView.get_context_data').start()\n\n fake_resource = FakeResource()\n self.form.return_value = fake_resource\n _ = self.view(self.request)\n\n computed_form = mock_render.call_args[1].get('form')\n self.assertEqual(fake_resource, computed_form)", "def render_to(template_path):\n\n def decorator(func):\n def wrapper(request, *args, **kwargs):\n output = func(request, *args, **kwargs)\n if not isinstance(output, dict):\n return output\n ctx = RequestContext(request)\n return render_to_response(template_path, output,\n context_instance=ctx)\n return wrapper\n return decorator", "def test_do_render():\n bundle = MagicMock()\n resource = MagicMock()\n resource.to_yaml.return_value = \"\"\n bundle.resources.matching.return_value = [resource, resource]\n action = interface.CommandAction(MagicMock(), [], bundle)\n interface.do_render(action)", "def render_to(template_name):\n def renderer(func):\n def wrapper(request, *args, **kw):\n output = func(request, *args, **kw)\n if not isinstance(output, dict):\n return output\n return render_to_response(template_name, output,\n context_instance=RequestContext(request))\n return wrapper\n return renderer", "def test_for_template(self):\n self.assertTemplateUsed(self.response, 'my_info_template.html')", "def render_response(template, *args, **kwargs):\n\treturn render_template(template, *args, user=current_user(), **kwargs)", "def test_template(project):\n project.add_mock_file(\"templates\", \"test.tmpl\", \"{{ value }}\")\n project.compile(\"\"\"import unittest\nvalue = \"1234\"\nstd::print(std::template(\"unittest/test.tmpl\"))\n \"\"\")\n\n assert project.get_stdout() == \"1234\\n\"", "def render_template(template, **template_variables):\n return render_to_response(template, template_variables)", "def testing():\n return render_template(\"testing.html\")" ]
[ "0.79456276", "0.7762128", "0.75975364", "0.6913007", "0.6731079", "0.6667107", "0.66639966", "0.6500641", "0.62945276", "0.62749934", "0.62602276", "0.6258427", "0.62050587", "0.6040006", "0.5983413", "0.5940503", "0.5893055", "0.58894944", "0.5863483", "0.585568", "0.5855136", "0.5823781", "0.58028865", "0.58014184", "0.5752624", "0.57449454", "0.5743936", "0.57241106", "0.57163566", "0.5710367" ]
0.87866557
0
Assert that the following code creates a Flask flash message. The message must contain the given snippet to pass.
def assert_flashes(self, snippet, message=None): if message is None: message = "'{}' not found in any flash message".format(snippet) mock_flash = Mock(spec=flash) with patch('app.main.flash', mock_flash): yield mock_flash for call_args in mock_flash.call_args_list: args, _ = call_args if snippet.lower() in args[0].lower(): return self.fail(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_messages(client, test_db):\n login(client, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n rv = client.post(\n \"/add\",\n data=dict(title=\"<Hello>\", text=\"<strong>HTML</strong> allowed here\"),\n follow_redirects=True,\n )\n assert b\"No entries here so far\" not in rv.data\n assert b\"&lt;Hello&gt;\" in rv.data\n assert b\"<strong>HTML</strong> allowed here\" in rv.data", "def ttest_messages(self):\n self.login(\n app.config['USERNAME'],\n app.config['PASSWORD']\n )\n rv = self.app.post('/add', data=dict(\n title='<Hello>',\n text='<strong>HTML</strong> allowed here'\n ), follow_redirects=True)\n assert b'No entries here so far' not in rv.data\n assert b'&lt;Hello&gt;' in rv.data\n assert b'<strong>HTML</strong> allowed here' in rv.data", "def step_impl(context, message):\n assert message in context.driver.title", "def step_impl(context, message):\n expect(context.driver.title).to_contain(message)", "def flash_message(self, request):\n return FlashMessagesElement()", "def flash_message(self, request):\n return FlashMessagesElement()", "def test_bleach_body(self):\n self.test_resource.description = \"<script>alert('hi!');</script>\"\n self.test_resource.full_clean()\n self.test_resource.save()\n self.assertEqual(self.test_resource.description, \"&lt;script&gt;alert('hi!');&lt;/script&gt;\")", "def test_shred_success_alert():\n app = HelperApp(server.message_app)\n app.post('/login/', {'username': 'jessie', 'password': 'frog'})\n\n # Add a an message\n app.post('/compose/', {'to': 'james', 'subject': 's', 'body': 'b'})\n app.get('/') # Clears alerts\n\n # Remove something bogus\n app.post('/shred/')\n\n # Make sure we warn the user about it\n alerts = unpack_alerts(app.cookies)\n assert len(alerts) == 1\n assert alerts == [{'kind': 'success',\n 'message': 'Shreded all messages.'}]", "def test_now_lifecycle(self):\n self.response = self.client.get(reverse(views.set_now_var))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def test_default_lifecycle(self):\n self.response = self.client.get(reverse(views.set_flash_var))\n self.assertEqual('Message', self._flash()['message'])\n\n self.response = self.client.get(reverse(views.render_template))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def test_construct_with_body():\n control_data = get_control_data('body')\n payload = messages.construct_payload('Welcome', node_id='my-board', body='<h1>Hello!</h1>')\n assert payload == control_data\n return", "def test_get_secret_3(self):\n\n text_subject = \"Important Message\"\n text_body = \"\"\"\n This is body of plain text message of some email\n \"\"\"\n self.assertIsNone(\n # no secret in the text\n get_secret([text_subject, text_body])\n )", "def flash(text, type=INFO):\n flash_message['text'] = text\n flash_message['type'] = type", "def check_alert(step, text):\r\n\r\n try:\r\n alert = Alert(world.browser)\r\n assert_equals(alert.text, text)\r\n except WebDriverException:\r\n # PhantomJS is kinda poor\r\n pass", "def test_replace_flash_scope(self):\n request = lambda: self.client.get(reverse(views.replace_flash))\n self.assertRaises(TypeError, request)", "def _error_embed_helper(title: str, description: str) -> discord.Embed:\n return discord.Embed(title=title, description=description, colour=discord.Colour.red())", "def verify_feedback_message(self):\n feedback_message = self.wait_until_find(locator_type=By.XPATH, locator=start_page.FEEDBACK_MESSAGE_XPATH).text\n assert feedback_message == start_page.FEEDBACK_MESSAGE_TEXT, f\"Actual message: {feedback_message}\"\n self.logger.debug(\"Feedback message was verified\")", "def assertTrue(self, statement, message):\n prefix = \"In component %s: \" % self.name\n if not statement:\n error(prefix + str(message))", "def test_discard_lifecycle(self):\n self.response = self.client.get(reverse(views.discard_var))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def test_multiple_variables_lifecycle(self):\n self.response = self.client.get(reverse(views.set_flash_var))\n self.assertEqual('Message', self._flash()['message'])\n\n self.response = self.client.get(reverse(views.set_another_flash_var))\n self.assertEqual('Message', self._flash()['message'])\n self.assertEqual('Another message', self._flash()['anotherMessage'])\n\n # 'message' will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())\n self.assertEqual('Another message', self._flash()['anotherMessage'])\n\n # 'anotherMessage' will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())\n self.assertFalse('anotherMessage' in self._flash())", "def test_keep_lifecycle(self):\n self.response = self.client.get(reverse(views.set_flash_var))\n self.assertEqual('Message', self._flash()['message'])\n\n self.response = self.client.get(reverse(views.keep_var))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value won't be removed now because it was explicitely kept\n self.response = self.client.get(reverse(views.render_template))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def test_message_as_student(self, do_student_launch):\n\n response = do_student_launch()\n\n assert_launched_as_student(response)", "def test_slackWH_send_good(get_slackwebhook, capsys):\n s = get_slackwebhook\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out", "def test_snippet_beginning_nonletter(self):\n message = Message(clean_text=u\"!I already know what this will be!!!!!\")\n self.assertEqual(\n message.snippet,\n 'I already know what...'\n )", "def error_embed(message: str, title: Optional[str] = None) -> Embed:\n title = title or random.choice(ERROR_REPLIES)\n embed = Embed(colour=Colours.soft_red, title=title)\n embed.description = message\n return embed", "def test_bad_placeholder_1(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My name is James.')", "def assertion_callback(_request, _uri, headers):\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')", "def test_flash_raises(capsys):\n with mock.patch('uflash.flash', side_effect=RuntimeError(\"boom\")):\n with pytest.raises(SystemExit):\n uflash.main(argv=['test.py'])\n\n _, stderr = capsys.readouterr()\n expected = 'Error flashing test.py'\n assert expected in stderr", "def test_lint_pass(self, style):\n result = run_linter_throw(\"path/to/file\",\n \"{s} /path/to/file\\n{m}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])\n self.assertTrue(result)", "async def Codeblock(self, context,message):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"```\"+ message + \"```\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tawait context.send(embed=embed)" ]
[ "0.5874714", "0.5859291", "0.5777571", "0.56764233", "0.5541651", "0.5541651", "0.5482096", "0.54655033", "0.54281986", "0.5292355", "0.525658", "0.522889", "0.5216381", "0.5192509", "0.5183022", "0.5176257", "0.5148801", "0.5143383", "0.5133528", "0.5115431", "0.50823", "0.5051929", "0.50106084", "0.5005722", "0.49960816", "0.4978217", "0.49630058", "0.49234518", "0.49178445", "0.49173015" ]
0.77274686
0
Generates the logger and the doing() context manager for a given name. The doing() context manager will display a message in the logs and handle exceptions occurring during execution, displaying them in the logs as well. If an exception occurs, the program is exited.
def make_doer(name): logger = getLogger(name) @contextmanager def doing(message): logger.info(message) # noinspection PyBroadException try: yield except LuhError as e: logger.error(e.message) exit(1) except Exception: logger.exception("Unknown error") exit(2) doing.logger = logger return doing
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n flags = parser_create()\n config_data = config_loader_yaml(flags.config_file)\n loggers_config = get_loggers_config(config_data)\n logging_queue = multiprocessing.Queue()\n logging_worker = LoggingWorker(loggers_config, logging_queue)\n logging_worker.start()\n\n class_name = \"\"\n function_name = inspect.stack()[0][3]\n\n for i in range(5):\n log_message(logging_queue, 'DEBUG', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'INFO', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'WARNING', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'ERROR', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'CRITICAL', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'Unknown', __name__, class_name, function_name, 'Message ' + str(i))\n\n logging_queue.put(None)\n logging_worker.join()", "def main():\n logger = setup_logger()\n\n logger.debug('a debug message')\n logger.info('an info message')\n logger.warning('a warning message')\n logger.error('an error message')\n logger.critical('a critical message')", "def logger_executor(self):\n logger = logging.getLogger(self.logger_name)\n logger.setLevel(level=self.logger_level)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s\")\n handler = RotatingFileHandler(self.logger_file, maxBytes=10*1024*1024, backupCount=5)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def make_logger(name=str(os.getpid())):\n if not sys.platform.startswith(\"win\") and sys.stderr.isatty():\n def add_color_emit_ansi(fn):\n \"\"\"Add methods we need to the class.\"\"\"\n def new(*args):\n \"\"\"Method overload.\"\"\"\n if len(args) == 2:\n new_args = (args[0], copy(args[1]))\n else:\n new_args = (args[0], copy(args[1]), args[2:])\n if hasattr(args[0], 'baseFilename'):\n return fn(*args)\n levelno = new_args[1].levelno\n if levelno >= 50:\n color = '\\x1b[31;5;7m\\n ' # blinking red with black\n elif levelno >= 40:\n color = '\\x1b[31m' # red\n elif levelno >= 30:\n color = '\\x1b[33m' # yellow\n elif levelno >= 20:\n color = '\\x1b[32m' # green\n elif levelno >= 10:\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n try:\n new_args[1].msg = color + str(new_args[1].msg) + ' \\x1b[0m'\n except Exception as reason:\n print(reason) # Do not use log here.\n return fn(*new_args)\n return new\n log.StreamHandler.emit = add_color_emit_ansi(log.StreamHandler.emit)\n log_file = os.path.join(gettempdir(), str(name).lower().strip() + \".log\")\n log.basicConfig(level=-1, filemode=\"w\", filename=log_file)\n log.getLogger().addHandler(log.StreamHandler(sys.stderr))\n adrs = \"/dev/log\" if sys.platform.startswith(\"lin\") else \"/var/run/syslog\"\n try:\n handler = log.handlers.SysLogHandler(address=adrs)\n except:\n log.debug(\"Unix SysLog Server not found, ignored Logging to SysLog.\")\n else:\n log.getLogger().addHandler(handler)\n log.debug(\"Logger created with Log file at: {0}.\".format(log_file))\n return log", "async def logger(self, ctx):\n await util.command_group_help(ctx)", "def create_logger(app_name: str) -> logging.Logger:\n if not os.path.exists(os.path.join(os.getcwd(), 'logs')):\n os.mkdir(os.path.join(os.getcwd(), 'logs'))\n\n app_logfile = os.path.join(os.getcwd(), 'logs', f'{app_name}.log')\n\n logger = logging.getLogger(f\"{app_name}-logger\")\n logger.setLevel(logging.DEBUG)\n\n handler = logging.handlers.RotatingFileHandler(filename=app_logfile, mode='a', maxBytes=20000, backupCount=10)\n handler.setLevel(logging.DEBUG)\n\n # Set the formatter\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n\n # Set it as the base handler\n logger.base_handler = handler\n\n # Also add a newline handler to switch to later\n newline_handler = logging.FileHandler(filename=app_logfile, mode='a')\n newline_handler.setLevel(logging.DEBUG)\n newline_handler.setFormatter(logging.Formatter(fmt='')) # Must be an empty format\n \n logger.newline_handler = newline_handler\n\n # Also add the provision for a newline handler using a custom method attribute\n logger.newline = types.MethodType(add_newlines, logger)\n\n # Also add a StreamHandler for printing to stderr\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n \n logger.addHandler(console_handler)\n\n return logger", "def get_logger(log_dir, name):\n class StreamHandlerWithTQDM(logging.Handler):\n \"\"\"Let `logging` print without breaking `tqdm` progress bars.\n See Also:\n > https://stackoverflow.com/questions/38543506\n \"\"\"\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n # Create logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # Log everything (i.e., DEBUG level and above) to a file\n log_path = os.path.join(log_dir, f'{name}.txt')\n file_handler = logging.FileHandler(log_path)\n file_handler.setLevel(logging.DEBUG)\n\n # Log everything except DEBUG level (i.e., INFO level and above) to console\n console_handler = StreamHandlerWithTQDM()\n console_handler.setLevel(logging.INFO)\n\n # Create format for the logs\n file_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n file_handler.setFormatter(file_formatter)\n console_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n console_handler.setFormatter(console_formatter)\n\n # add the handlers to the logger\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n return logger", "def ContextLog(logger, oline, cline):\n logger('{}...'.format(oline))\n yield\n logger('{}.'.format(cline))", "def run(self):\r\n self.log(texto=f\"Executando {self._name}\")", "def create_logger(\n project_name: str,\n level: str = \"INFO\",\n log_dir: str = \"/tmp/logs\",\n file_name: Optional[str] = None,\n do_print: bool = True,\n simple_logging: bool = False,\n log_to_file: bool = False,\n rich_logging: bool = False,\n time_zone: Optional[str] = None,\n):\n import __main__\n\n if file_name is None:\n try:\n file_name = ntpath.basename(__main__.__file__).split(\".\")[0]\n except:\n file_name = \"logs\"\n\n logger = logging.getLogger(file_name)\n logger.handlers.clear()\n logger.setLevel(getattr(logging, level))\n\n if time_zone:\n from pytz import timezone, utc\n def time_formatter(*args):\n # TODO: Doesnt work with rich formatter\n utc_dt = utc.localize(datetime.datetime.utcnow())\n my_tz = timezone(time_zone)\n converted = utc_dt.astimezone(my_tz)\n return converted.timetuple()\n\n logging.Formatter.converter = time_formatter\n\n if rich_logging:\n from rich.logging import RichHandler\n stream_format = f\"{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = RichHandler(omit_repeated_times=False)\n else:\n stream_format = f\"%(asctime)s:%(levelname)s:{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = logging.StreamHandler()\n\n file_formatter = stream_formatter = logging.Formatter(\n stream_format, \"%Y-%m-%d %H:%M:%S\"\n )\n\n if simple_logging:\n file_formatter = logging.Formatter(\"%(message)s\")\n stream_formatter = logging.Formatter(\"%(message)s\")\n\n if log_to_file:\n date = datetime.date.today()\n date = \"%s-%s-%s\" % (date.day, date.month, date.year)\n log_file_path = os.path.join(log_dir, \"%s-%s.log\" % (file_name, date))\n\n create_folder(log_dir)\n file_handler = logging.FileHandler(log_file_path)\n file_handler.setFormatter(file_formatter)\n logger.addHandler(file_handler)\n\n if do_print:\n stream_handler.setFormatter(stream_formatter)\n logger.addHandler(stream_handler)\n\n logger.propagate = False\n\n return logger", "def construct_logger(name, save_dir):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n file_no_ext = out_file_core()\n\n fh = logging.FileHandler(os.path.join(save_dir, file_no_ext + \".txt\"), encoding=\"utf-8\")\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(name)s %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n gitdiff_patch = os.path.join(save_dir, file_no_ext + \".gitdiff.patch\")\n os.system(f\"git diff HEAD > {gitdiff_patch}\")\n\n return logger", "def whLogger(name):\n return logging.getLogger('wh.'+name)", "def named(name):\n\n def new_annotate(mware):\n def new_middleware(handler):\n\n new_handler = mware(handler)\n\n def verbose_handler(ctx):\n _print_inwards(name)\n\n new_ctx = new_handler(ctx)\n\n _print_outwards(name)\n\n return new_ctx\n\n return verbose_handler\n\n return new_middleware\n\n return new_annotate", "def logger(name=None):\r\n\r\n log = logging.getLogger(name or 'logging')\r\n if HANDLER and HANDLER not in log.handlers:\r\n log.addHandler(HANDLER)\r\n\r\n return log", "def setup_logbook(name, extension='.txt', level=logging.INFO, soloDir = True):\n formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d (%(name)s) - %(message)s', datefmt='%d-%m-%y %H:%M:%S')\n date = datetime.today().strftime('%Y-%m-%d')\n if soloDir:\n log_path = str(settings.DATA_DIR + name + '/' + name.replace('_', '') +'_' + date + extension)\n else:\n log_path = str(settings.DATA_DIR + name +'_' + date + extension)\n handler = RotatingFileHandler(log_path, maxBytes=settings.MAX_FILE_SIZE, backupCount=1)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def logger_setup(self, logger_name):\n logger = logging.getLogger(logger_name)\n logger_path = \"/tmp/\" + logger.name\n logger_format = '%(asctime)s %(name)s %(levelname)s %(lineno)d %(message)s'\n\n # set up logging to file\n logging.basicConfig(\n level=logging.INFO,\n format=logger_format,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=logger_path,\n filemode='w'\n )\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which for console use\n formatter = logging.Formatter(logger_format)\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n return logger", "def main():\n logger = ConsoleLogger([LogLevel.ALL])\n email_logger = logger.set_next(\n EmailLogger([LogLevel.FUNCTIONAL_MESSAGE, LogLevel.FUNCTIONAL_ERROR])\n )\n # As we don't need to use file logger instance anywhere later\n # We will not set any value for it.\n email_logger.set_next(\n FileLogger([LogLevel.WARNING, LogLevel.ERROR])\n )\n\n # ConsoleLogger will handle this part of code since the message\n # has a log level of all\n logger.message(\"Entering function ProcessOrder().\", LogLevel.DEBUG)\n logger.message(\"Order record retrieved.\", LogLevel.INFO)\n\n # ConsoleLogger and FileLogger will handle this part since file logger\n # implements WARNING and ERROR\n logger.message(\n \"Customer Address details missing in Branch DataBase.\",\n LogLevel.WARNING\n )\n logger.message(\n \"Customer Address details missing in Organization DataBase.\",\n LogLevel.ERROR\n )\n\n # ConsoleLogger and EmailLogger will handle this part as they implement\n # functional error\n logger.message(\n \"Unable to Process Order ORD1 Dated D1 for customer C1.\",\n LogLevel.FUNCTIONAL_ERROR\n )\n logger.message(\"OrderDispatched.\", LogLevel.FUNCTIONAL_MESSAGE)", "def get_logger(name='some script'):\n\n #timestamp for filename \n timestamp = datetime.now().strftime('%Y-%m-%d')\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n #custom formatter\n formatter = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(filename)s '\n '%(funcName)s line: %(lineno)s: %(msg)s'\n )\n handler = logging.FileHandler('/tmp/scripts_{0}.log'.format(timestamp))\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n #print to stdout if it's interactive, but file-only if not\n if sys.stdin.isatty():\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n \n return logger", "def getLogger(name):\n return logging.getLogger(\".\".join([\"mpm\"] + name.split(\".\")[1:]))", "def logging_manager(*, debug: bool = False) -> Iterator[None]:\n formatter = Formatter(fmt=\"%(levelname)s: %(message)s\", datefmt=\"\")\n root_logger = logging.getLogger(\"conda-pytorch\")\n root_logger.setLevel(logging.DEBUG)\n\n console_handler = logging.StreamHandler()\n if debug:\n console_handler.setLevel(logging.DEBUG)\n else:\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(formatter)\n root_logger.addHandler(console_handler)\n\n log_file = os.path.join(logging_run_dir(), \"nightly.log\")\n\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(formatter)\n root_logger.addHandler(file_handler)\n logging_record_argv()\n\n try:\n logging_rotate()\n print(f\"log file: {log_file}\")\n yield root_logger\n except Exception as e:\n logging.exception(\"Fatal exception\")\n logging_record_exception(e)\n print(f\"log file: {log_file}\")\n sys.exit(1)\n except BaseException as e:\n # You could logging.debug here to suppress the backtrace\n # entirely, but there is no reason to hide it from technically\n # savvy users.\n logging.info(\"\", exc_info=True)\n logging_record_exception(e)\n print(f\"log file: {log_file}\")\n sys.exit(1)", "def settings_context(content, directory=None, name=\"LOGGING_CONFIG\"):\n initial_logging_config = os.environ.get(\"AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS\", \"\")\n try:\n settings_root = tempfile.mkdtemp()\n filename = f\"{SETTINGS_DEFAULT_NAME}.py\"\n if directory:\n # Create the directory structure with __init__.py\n dir_path = os.path.join(settings_root, directory)\n pathlib.Path(dir_path).mkdir(parents=True, exist_ok=True)\n\n basedir = settings_root\n for part in directory.split(\"/\"):\n open(os.path.join(basedir, \"__init__.py\"), \"w\").close()\n basedir = os.path.join(basedir, part)\n open(os.path.join(basedir, \"__init__.py\"), \"w\").close()\n\n # Replace slashes by dots\n module = directory.replace(\"/\", \".\") + \".\" + SETTINGS_DEFAULT_NAME + \".\" + name\n settings_file = os.path.join(dir_path, filename)\n else:\n module = SETTINGS_DEFAULT_NAME + \".\" + name\n settings_file = os.path.join(settings_root, filename)\n\n with open(settings_file, \"w\") as handle:\n handle.writelines(content)\n sys.path.append(settings_root)\n\n # Using environment vars instead of conf_vars so value is accessible\n # to parent and child processes when using 'spawn' for multiprocessing.\n os.environ[\"AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS\"] = module\n yield settings_file\n\n finally:\n os.environ[\"AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS\"] = initial_logging_config\n sys.path.remove(settings_root)", "def _set_up_logging(tag, name):\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))", "def _set_up_logging(tag, name):\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))", "def _set_up_logging(tag, name):\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))", "def getLogger(name):\n return logging.getLogger(name)", "def create_logger(job_name, log_file=None, debug=True):\n logging.basicConfig(level=5,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M')\n logging.root.handlers = []\n if debug:\n chosen_level = 5\n else:\n chosen_level = logging.INFO\n logger = logging.getLogger(job_name)\n formatter = logging.Formatter(fmt='%(asctime)s %(message)s',\n datefmt='%m/%d %H:%M:%S')\n if log_file is not None:\n log_dir = osp.dirname(log_file)\n if log_dir:\n if not osp.exists(log_dir):\n os.makedirs(log_dir)\n # cerate file handler\n fh = logging.FileHandler(log_file)\n fh.setLevel(chosen_level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n # Colored stream handler\n sh = ColorStreamHandler()\n sh.setLevel(chosen_level)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n return logger", "def setup_logger(\n output=None, distributed_rank=0, *, color=True, name=\"chefCap\", abbrev_name=None, log_level=logging.DEBUG\n):\n logger = logging.getLogger(name)\n logger.setLevel(log_level)\n logger.propagate = False\n\n if abbrev_name is None:\n abbrev_name = \"d2\" if name == \"detectron2\" else name\n\n plain_formatter = logging.Formatter(\n \"[%(asctime)s] %(threadName)-9s %(name)s %(levelname)s: %(message)s\", datefmt=\"%m/%d %H:%M:%S\"\n )\n # stdout logging: master only\n if distributed_rank == 0:\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(logging.DEBUG)\n if color:\n formatter = _ColorfulFormatter(\n colored(\"[%(asctime)s %(threadName)-9s %(name)s]: \", \"green\") + \"%(message)s\",\n datefmt=\"%m/%d %H:%M:%S\",\n root_name=name,\n abbrev_name=str(abbrev_name),\n )\n else:\n formatter = plain_formatter\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # file logging: all workers\n if output is not None:\n if output.endswith(\".txt\") or output.endswith(\".log\"):\n filename = output\n else:\n filename = os.path.join(output, \"log.txt\")\n if distributed_rank > 0:\n filename = filename + \".rank{}\".format(distributed_rank)\n PathManager.mkdirs(os.path.dirname(filename))\n\n fh = logging.StreamHandler(_cached_log_stream(filename))\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(plain_formatter)\n logger.addHandler(fh)\n\n return logger", "def get_logger(name, level='INFO', terminal_log=True, file_log=False,\n file_name=None, file_max_bytes=1048576, file_backup_count=3,\n email_on_warnings=True, email_on_errors=True):\n # Get the root logger and set the level\n log_level = getattr(logging, level.upper())\n root_logger = logging.getLogger('')\n root_logger.setLevel(log_level)\n\n handlers = []\n # Form the handler(s) and set the level\n if terminal_log:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(log_level)\n handlers.append(stream_handler)\n\n # Create email warning handler\n if email_on_warnings:\n # Note, the placeholder in the subject will be replaced by the hostname\n warning_email_handler = CustomSMTPWarningHandler(\n mailhost=MAIL_HOST, fromaddr=WARNING_EMAIL,\n toaddrs=[WARNING_EMAIL], subject='Warning from: {}')\n warning_email_handler.setLevel(logging.WARNING)\n handlers.append(warning_email_handler)\n\n # Create email error handler\n if email_on_errors:\n # Note, the placeholder in the subject will be replaced by the hostname\n error_email_handler = CustomSMTPHandler(\n mailhost=MAIL_HOST, fromaddr=ERROR_EMAIL,\n toaddrs=[ERROR_EMAIL], subject='Error from: {}')\n error_email_handler.setLevel(logging.ERROR)\n handlers.append(error_email_handler)\n\n # Create rotating file handler\n if file_log:\n if file_name is None:\n file_name = name + '.log'\n file_handler = RotatingFileHandler(file_name, maxBytes=file_max_bytes,\n backupCount=file_backup_count)\n file_handler.setLevel(log_level)\n handlers.append(file_handler)\n\n # Add formatters to the handlers and add the handlers to the root_logger\n formatter = logging.Formatter(\n '%(asctime)s:%(name)s: %(levelname)s: %(message)s')\n for handler in handlers:\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n\n # Create a named logger and return it\n logger = logging.getLogger(name)\n return logger", "def setup_logger(name='', level=twiggy.levels.DEBUG,\n fmt=twiggy.formats.line_format,\n fmt_name=('{0:%s}' % 10).format,\n screen=None, file_name=None,\n mpi_comm=None, zmq_addr=None,\n log_exceptions=True, multiline=False):\n\n fmt = copy.copy(fmt)\n fmt.conversion.delete('name')\n\n # Apply name format to the value (i.e., the name), not the key (i.e., the\n # field name \"name\"):\n fmt.conversion.add('name', str, lambda k, v: fmt_name(v))\n\n if file_name:\n if mpi_comm:\n if 'mpi4py.MPI' not in sys.modules:\n raise ValueError('mpi4py not available')\n if not isinstance(mpi_comm, mpi4py.MPI.Intracomm):\n raise ValueError('mpi_comm must be an instance of '\n 'mpi4py.MPI.Intracomm')\n if 'neurokernel.tools.mpi' not in sys.modules:\n raise ValueError('neurokernel.tools.mpi not available')\n file_output = \\\n neurokernel.tools.mpi.MPIOutput(file_name, fmt, mpi_comm)\n else:\n file_output = \\\n twiggy.outputs.FileOutput(file_name, fmt, 'w')\n twiggy.add_emitters(('file', level, None, file_output))\n\n if screen:\n screen_output = \\\n twiggy.outputs.StreamOutput(fmt, stream=sys.stdout)\n twiggy.add_emitters(('screen', level, None, screen_output))\n\n if zmq_addr:\n if 'neurokernel.tools.zmq' not in sys.modules:\n raise ValueError('neurokernel.tools.zmq not available')\n zmq_output = neurokernel.tools.zmq.ZMQOutput(zmq_addr, fmt)\n twiggy.add_emitters(('zmq', level, None, zmq_output))\n\n logger = twiggy.log.name(fmt_name(name))\n if log_exceptions:\n set_excepthook(logger, multiline)\n\n return logger", "def setup_and_get_worker_interceptor_logger(is_for_stdout: bool = True):\n file_extension = \"out\" if is_for_stdout else \"err\"\n logger = logging.getLogger(f\"ray_default_worker_{file_extension}\")\n logger.setLevel(logging.INFO)\n # TODO(sang): This is how the job id is propagated to workers now.\n # But eventually, it will be clearer to just pass the job id.\n job_id = os.environ.get(\"RAY_JOB_ID\")\n if args.worker_type == \"WORKER\":\n assert job_id is not None, (\n \"RAY_JOB_ID should be set as an env \"\n \"variable within default_worker.py. If you see this error, \"\n \"please report it to Ray's Github issue.\")\n worker_name = \"worker\"\n else:\n job_id = ray.JobID.nil()\n worker_name = \"io_worker\"\n\n # Make sure these values are set already.\n assert ray.worker._global_node is not None\n assert ray.worker.global_worker is not None\n handler = RotatingFileHandler(\n f\"{ray.worker._global_node.get_session_dir_path()}/logs/\"\n f\"{worker_name}-\"\n f\"{ray.utils.binary_to_hex(ray.worker.global_worker.worker_id)}-\"\n f\"{job_id}-{os.getpid()}.{file_extension}\")\n logger.addHandler(handler)\n # TODO(sang): Add 0 or 1 to decide whether\n # or not logs are streamed to drivers.\n handler.setFormatter(logging.Formatter(\"%(message)s\"))\n # Avoid messages are propagated to parent loggers.\n logger.propagate = False\n # Remove the terminator. It is important because we don't want this\n # logger to add a newline at the end of string.\n handler.terminator = \"\"\n return logger" ]
[ "0.56715065", "0.5547985", "0.55348605", "0.5509056", "0.55090016", "0.5430356", "0.54287386", "0.53950167", "0.538413", "0.5351113", "0.52963126", "0.5291368", "0.52313596", "0.5222739", "0.51738536", "0.51407766", "0.5124997", "0.51218605", "0.50439346", "0.502796", "0.50261927", "0.5012156", "0.5012156", "0.5012156", "0.4998264", "0.49962237", "0.4994511", "0.49895975", "0.49836412", "0.49822578" ]
0.7915124
0
Imports a Python file as a module named luh3417.{name}
def import_file(name: Text, file_path: Text): spec = spec_from_file_location(f"luh3417.{name}", file_path) module = module_from_spec(spec) spec.loader.exec_module(module) return module
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_module(self, location, name):", "def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\n __import__(name)\n return sys.modules[name]", "def _import_module(name):\n __import__(name)\n return sys.modules[name]", "def _import_string(module_name, content):\n\n # assign module a name that's not likely to conflict\n safe_name = 'confab.data.' + module_name\n\n # check if module is already loaded\n existing = sys.modules.get(safe_name)\n if existing:\n return existing\n\n # try to load module\n module = imp.new_module(safe_name)\n exec content in module.__dict__\n return module", "def my_import(name):\n components = name.split('.')\n mod = __import__(components[0], globals(), locals(), components[1:], -1)\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod", "def import_module(name):\n __import__(name)\n return sys.modules[name]", "def import_from_file(module_name: str, filepath: str):\n return SourceFileLoader(module_name, filepath).load_module()", "def import_(filename):\n (path, name) = os.path.split(filename)\n (name, ext) = os.path.splitext(name)\n try:\n return sys.modules[name]\n except KeyError:\n pass\n try:\n file, filename, data = imp.find_module(name, [path])\n except ImportError:\n print('No module {} found'.format(name))\n try:\n mod = imp.load_module(name, file, filename, data)\n return mod\n except UnboundLocalError:\n pass\n finally:\n # Since we may exit via an exception, close fp explicitly.\n try:\n if file:\n file.close()\n except UnboundLocalError:\n if not os.path.exists(path):\n os.makedirs(path)\n from shutil import copyfile\n if os.name == 'nt':\n copyfile(os.path.join(path_to_module, 'models\\myfitmodels.py'), filename)\n else:\n copyfile(os.path.join(path_to_module, './models/myfitmodels.py'), filename)\n # open(filename, 'a').close()", "def importModule(filename):\n\tfrom os.path import abspath, split, splitext\n\tfrom sys import path\n\tif isPython2():\n\t\tfrom imp import reload\n\telse:\n\t\tfrom importlib import reload\n\t\n\tfilename = adaptPath(filename)\n\tmodulePath = abspath(split(filename)[0])\n\tmoduleName = splitext(split(filename)[1])[0]\n\t\n\tif not modulePath in path:\n\t\tpath.append (modulePath)\n\tmodule = __import__(moduleName)\n\treload (module)\n\treturn module", "def load_module(file_name):\n path = temp.relpath(file_name)\n m = _load_module(path)\n logger.info(\"load_module %s\", path)\n return m", "def import_main(name):\n config.MAIN_MODULE_NAME = name\n return importlib.import_module(name)", "def _importer(name, root_package=False, relative_globals=None, level=0):\n return __import__(name, locals=None, # locals has no use\n globals=relative_globals,\n fromlist=[] if root_package else [None],\n level=level)", "def as_module(file_path, name):\n\n with lock:\n with open(file_path, 'U') as module_file:\n prev = sys.dont_write_bytecode\n sys.dont_write_bytecode = True\n module = imp.load_module(name, module_file, file_path, (\".py\", 'U', imp.PY_SOURCE))\n sys.dont_write_bytecode = prev\n sys.modules[name] = module\n return module", "def import_from(module: str, name: str):\n\n module = __import__(module, fromlist=[name])\n return getattr(module, name)", "def import_module(name, path):\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module", "def importOverride(name, glbls={}, lcls={}, fromlist=[], level=-1):\n module = None\n # First try the system __import__ first\n try:\n module = BUILTIN_IMPORT(name, glbls, lcls, fromlist, level)\n # You cannot log in this namespace, due to an infinite regression issue, so don't try\n # Although I am thinking that disabling the import override, logging, and re enabling it would work\n except ImportError as error:\n # Next we will try to import them as a *.cc\n # First we need to determine if it exists\n # Check the folders in CC_PATH\n for path in CC_PATH:\n # If the path exists\n if os.path.exists(path):\n # And the path/<module name>.cc exists\n if os.path.exists(os.path.join(path, name+'.cc')):\n # We will use the first one we find\n # No the magic happens, we will first create a temp file\n temp_file = tempfile.TemporaryFile()\n # Now we add the 'magic' to the top of the temp file\n temp_file.write(MAGIC)\n # Now open the file being imported\n module_file = open(os.path.join(path, name+'.cc'), 'r')\n # Read the module contents into the temp file\n temp_file.write(module_file.read())\n module_file.close()\n # Now rewind the temp file so it can be read from the beginning\n temp_file.seek(0)\n # Now import the module\n try:\n module = imp.load_module(name, temp_file, path, ('.cc', 'r', imp.PY_SOURCE))\n except Exception as exception:\n logError(sys.exc_info(), log.error, 'Error importing control code file %s.cc:' % name, MAGIC_LINENO)\n finally:\n temp_file.close()\n log.debug('Module %s loaded from %s using the special .cc import' % (name, path))\n # If module is still None, we didn't find it and we should raise the original error\n if not module:\n raise error\n return module", "def _import(module_name, dir_name):\n\n # assign module a name that's not likely to conflict\n safe_name = 'confab.data.' + module_name\n\n # check if module is already loaded\n existing = sys.modules.get(safe_name)\n if existing:\n return existing\n\n # try to load module\n module_info = imp.find_module(module_name, [dir_name])\n module = imp.load_module(safe_name, *module_info)\n return module", "def ppimport(name):\n global _ppimport_is_enabled\n\n level = 1\n parent_frame = p_frame = _get_frame(level)\n while not p_frame.f_locals.has_key('__name__'):\n level = level + 1\n p_frame = _get_frame(level)\n\n p_name = p_frame.f_locals['__name__']\n if p_name=='__main__':\n p_dir = ''\n fullname = name\n elif p_frame.f_locals.has_key('__path__'):\n # python package\n p_path = p_frame.f_locals['__path__']\n p_dir = p_path[0]\n fullname = p_name + '.' + name\n else:\n # python module\n p_file = p_frame.f_locals['__file__']\n p_dir = os.path.dirname(p_file)\n fullname = p_name + '.' + name\n\n # module may be imported already\n module = sys.modules.get(fullname)\n if module is not None:\n if _ppimport_is_enabled or isinstance(module, types.ModuleType):\n return module\n return module._ppimport_importer()\n\n so_ext = _get_so_ext()\n py_exts = ('.py','.pyc','.pyo')\n so_exts = (so_ext,'module'+so_ext)\n\n for d,n,fn,e in [\\\n # name is local python module or local extension module\n (p_dir, name, fullname, py_exts+so_exts),\n # name is local package\n (os.path.join(p_dir, name), '__init__', fullname, py_exts),\n # name is package in parent directory (scipy specific)\n (os.path.join(os.path.dirname(p_dir), name), '__init__', name, py_exts),\n ]:\n location = _is_local_module(d, n, e)\n if location is not None:\n fullname = fn\n break\n\n if location is None:\n # name is to be looked in python sys.path.\n fullname = name\n location = 'sys.path'\n\n # Try once more if module is imported.\n # This covers the case when importing from python module\n module = sys.modules.get(fullname)\n\n if module is not None:\n if _ppimport_is_enabled or isinstance(module,types.ModuleType):\n return module\n return module._ppimport_importer()\n # It is OK if name does not exists. The ImportError is\n # postponed until trying to use the module.\n\n loader = _ModuleLoader(fullname,location,p_frame=parent_frame)\n if _ppimport_is_enabled:\n return loader\n\n return loader._ppimport_importer()", "def load_module(name, path):\n loader = importlib.machinery.SourceFileLoader(name, path)\n module = types.ModuleType(loader.name)\n loader.exec_module(module)\n return module", "def load_mod_from_file(self, fpath):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tfpath = os.path.abspath(fpath)\n\t\tfile_ext = os.path.splitext(os.path.split(fpath)[-1])[-1]\n\t\tif file_ext.lower() != '.py':\n\t\t\treturn\n\t\twith open(fpath) as f:\n\t\t\tcontent = f.read().splitlines()\n\t\tok = False\n\t\tfor line in content:\n\t\t\tif line.strip() == 'from shutit_module import ShutItModule':\n\t\t\t\tok = True\n\t\t\t\tbreak\n\t\tif not ok:\n\t\t\tself.log('Rejected file: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Note that this attribute will only be set for 'new style' module loading, # this should be ok because 'old style' loading checks for duplicate # existing modules.\n\t\t# TODO: this is quadratic complexity\n\t\texistingmodules = [\n\t\t\tm for m in self.shutit_modules\n\t\t\tif getattr(m, '__module_file', None) == fpath\n\t\t]\n\t\tif existingmodules:\n\t\t\tself.log('Module already seen: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Looks like it's ok to load this file\n\t\tself.log('Loading source for: ' + fpath,level=logging.DEBUG)\n\n\t\t# Add this directory to the python path iff not already there.\n\t\tdirectory = os.path.dirname(fpath)\n\t\tif directory not in sys.path:\n\t\t\tsys.path.append(os.path.dirname(fpath))\n\t\t# TODO: use bytearray to encode?\n\t\tmod_name = base64.b32encode(fpath.encode()).decode().replace('=', '')\n\t\tpymod = imp.load_source(mod_name, fpath)\n\n\t\t# Got the python module, now time to pull the shutit module(s) out of it.\n\t\ttargets = [\n\t\t\t('module', self.shutit_modules), ('conn_module', self.conn_modules)\n\t\t]\n\t\tself.build['source'] = {}\n\t\tfor attr, target in targets:\n\t\t\tmodulefunc = getattr(pymod, attr, None)\n\t\t\t# Old style or not a shutit module, nothing else to do\n\t\t\tif not callable(modulefunc):\n\t\t\t\treturn\n\t\t\tmodules = modulefunc()\n\t\t\tif not isinstance(modules, list):\n\t\t\t\tmodules = [modules]\n\t\t\tfor module in modules:\n\t\t\t\tsetattr(module, '__module_file', fpath)\n\t\t\t\tShutItModule.register(module.__class__)\n\t\t\t\ttarget.add(module)\n\t\t\t\tself.build['source'][fpath] = open(fpath).read()", "def LoadModule(filename):\n (name, ext) = os.path.splitext(filename)\n\n fh = open(filename, \"r\")\n try:\n return imp.load_module(name, fh, filename, (ext, \"r\", imp.PY_SOURCE))\n finally:\n fh.close()", "def load_module(file_name):\n mod_name = file_module_name(file_name)\n spec = imputil.spec_from_file_location(mod_name, file_name)\n if spec is None:\n raise ImportError(f'cannot import from {file_name!r}')\n mod = imputil.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod", "def importer(name) -> ContextType:\n try:\n # try importing as a module (using importlib from standard import mechanism)\n return __import__(name, globals=globals(), locals=locals())\n except:\n route_steps = name.split(\".\")\n route_steps = route_steps[1:] if not route_steps[0] else route_steps\n is_name_module, is_name_package = is_module(name), is_package(name)\n assert is_name_module or is_name_package\n file_path = os.path.join(*route_steps)\n if is_name_module:\n file_path = f\"{file_path}.py\"\n else: # name is definitely a package (because of the assertion)\n file_path = os.path.join(file_path, \"__init__.py\")\n spec = importlib.util.spec_from_file_location(name, file_path)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return foo", "def load_module(module_name, file_name):\n from importlib.machinery import SourceFileLoader\n home_dir = os.path.expanduser(\"~\")\n valid_paths = [\n os.path.join(home_dir, \"Google Drive\"),\n os.path.join(home_dir, \"GoogleDrive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"Google Drive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"GoogleDrive\"),\n os.path.join(\"C:/\", \"GoogleDrive\"),\n os.path.join(\"C:/\", \"Google Drive\"),\n os.path.join(\"D:/\", \"GoogleDrive\"),\n os.path.join(\"D:/\", \"Google Drive\"),\n ]\n\n drive_path = None\n for path in valid_paths:\n if os.path.isdir(path):\n drive_path = path\n break\n\n if drive_path is None:\n raise Exception(\"Couldn't find google drive folder!\")\n\n utils_path = os.path.join(drive_path, \"_pyutils\")\n print(\"Loading [{}] package...\".format(os.path.join(utils_path,file_name)),flush = True)\n logger_lib = SourceFileLoader(module_name, os.path.join(utils_path, file_name)).load_module()\n print(\"Done loading [{}] package.\".format(os.path.join(utils_path,file_name)),flush = True)\n\n return logger_lib", "def dynamicallyLoadModule(name):\n f, file, desc=imp.find_module(name, [ROLES_DIR])\n return imp.load_module(ROLES_PKG_NAME+'.'+name, f, file, desc)", "def import_python_module_by_filename(name, module_filename):\n\n sys.path.append(abspath(dirname(module_filename)))\n spec = importlib.util.spec_from_file_location(\n name,\n location=module_filename)\n imported_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(imported_module)\n return imported_module" ]
[ "0.7269499", "0.652222", "0.6388376", "0.6388376", "0.6388376", "0.6348975", "0.6348975", "0.6311361", "0.62959266", "0.62948275", "0.6222974", "0.61776567", "0.6171849", "0.6134908", "0.6124925", "0.6088499", "0.60543156", "0.60247666", "0.59827363", "0.5958609", "0.5942658", "0.5929849", "0.58722687", "0.5860484", "0.5849049", "0.58253074", "0.5818625", "0.5811186", "0.57969713", "0.57756966" ]
0.7448031
0
Fetch the URL from the Neutron service for a particular endpoint type. If none given, return publicURL.
def url_for(self, attr=None, filter_value=None, service_type='network', endpoint_type='publicURL'): catalog = self.catalog['access'].get('serviceCatalog', []) matching_endpoints = [] for service in catalog: if service['type'] != service_type: continue endpoints = service['endpoints'] for endpoint in endpoints: if not filter_value or endpoint.get(attr) == filter_value: matching_endpoints.append(endpoint) if not matching_endpoints: raise exceptions.EndpointNotFound() elif len(matching_endpoints) > 1: raise exceptions.AmbiguousEndpoints( matching_endpoints=matching_endpoints) else: if endpoint_type not in matching_endpoints[0]: raise exceptions.EndpointTypeNotFound(type_=endpoint_type) return matching_endpoints[0][endpoint_type]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_endpoint(self, datacenter=None, network=None):\r\n if datacenter is None:\r\n datacenter = 'dal05'\r\n if network is None:\r\n network = 'public'\r\n try:\r\n host = ENDPOINTS[datacenter][network]\r\n return \"https://%s\" % host\r\n except KeyError:\r\n raise TypeError('Invalid endpoint %s/%s'\r\n % (datacenter, network))", "def endpoint_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_url\")", "def _get_endpoint(ks_session, **kwargs):\n # set service specific endpoint types\n endpoint_type = kwargs.get('endpoint_type') or 'publicURL'\n service_type = kwargs.get('service_type') or 'monitoring'\n\n endpoint = ks_session.get_endpoint(service_type=service_type,\n interface=endpoint_type,\n region_name=kwargs.get('region_name'))\n\n return endpoint", "def endpoint_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"endpoint_uri\")", "def get_neutron_endpoint(cls, json_resp):\n catalog = json_resp.get('token', {}).get('catalog', [])\n match = 'neutron'\n\n neutron_endpoint = None\n for entry in catalog:\n if entry['name'] == match or 'Networking' in entry['name']:\n valid_endpoints = {}\n for ep in entry['endpoints']:\n interface = ep.get('interface', '')\n if interface in ['public', 'internal']:\n valid_endpoints[interface] = ep['url']\n\n if valid_endpoints:\n # Favor public endpoints over internal\n neutron_endpoint = valid_endpoints.get(\"public\", valid_endpoints.get(\"internal\"))\n break\n else:\n raise MissingNeutronEndpoint()\n\n return neutron_endpoint", "def endpoint_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_uri\")", "def endpoint_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_uri\")", "def get_endpoint_url(endpoint):\n return urljoin(api_url_base(), endpoint)", "def _lookup_url(self, endpoint, values):\r\n try:\r\n cont = self.get_container(values['container'])\r\n if cont.cdn_enabled:\r\n return \"%s/%s\" % (cont.cdn_uri, values['filename'])\r\n else:\r\n return None\r\n except: # pragma: no cover\r\n return None", "def url(self) -> Optional[str]:\n return pulumi.get(self, \"url\")", "def build_url(self, endpoint_url: str) -> str:\n return self.base_url + endpoint_url % self.instance_id", "def url(self):\n _, body = self.request('/v1.1/url', 'GET')\n return body.get('url', None)", "def build_url(self, endpoint):\n if hasattr(self, \"port\"):\n return \"{}://{}:{}/{}\".format(\n self.scheme, self.root_url, self.port, endpoint)\n else:\n return \"{}://{}/{}\".format(\n self.scheme, self.root_url, endpoint)", "def _get_endpoint() -> Optional[str]:\n if request.endpoint is None:\n return None\n return request.endpoint.split(\".\")[-1]", "def get_one(self, endpoint_ident):\n context = pecan.request.context\n endpoint = api_utils.get_resource('Endpoint', endpoint_ident)\n return Endpoint.convert_with_links(endpoint)", "def _get_api_endpoint():\n try:\n return get_service_endpoint(\"apiext\").strip(\"/\")\n except:\n log.warn(\n \"Could not find valid apiext endpoint for links so will use policy engine endpoint instead\"\n )\n try:\n return get_service_endpoint(\"policy_engine\").strip(\"/\")\n except:\n log.warn(\n \"No policy engine endpoint found either, using default but invalid url\"\n )\n return \"http://<valid endpoint not found>\"", "def get_service_url():\n return get_config_handler().get_service_url()", "def get_endpoint(self, endpoint):\n for item in self.endpoints:\n if endpoint == item[0]:\n return item\n return None", "def get_endpoint(self, session, **kwargs):\n endpoint_data = self.get_endpoint_data(\n session, discover_versions=False, **kwargs)\n if not endpoint_data:\n return None\n return endpoint_data.url", "def __get_endpoint(self):\n return self._endpoint", "def endpoint_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"endpoint_type\")", "def get_website_url(self):\n if self.website:\n return self.website.get_absolute_url()\n elif self.external_url:\n return self.external_url.url\n elif self.parent:\n # try\n return self.parent.get_website_url()\n else: # except\n return default_entity.get_website", "def url(self) -> str:\n return self.HTTP.url if self.HTTP else self._url", "def api_url(self, endpoint):\n\n return '{}/{}'.format(self.api_root, endpoint)", "def endpoint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint\")", "def endpoint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint\")", "def private_link_endpoint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_link_endpoint\")", "def private_link_endpoint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_link_endpoint\")", "def url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"url\")", "def url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"url\")" ]
[ "0.66849047", "0.6626688", "0.625258", "0.62082934", "0.61906004", "0.6171439", "0.6171439", "0.6152799", "0.60892427", "0.604996", "0.60300756", "0.5963532", "0.5956159", "0.5900996", "0.58805144", "0.5862944", "0.58121765", "0.5770395", "0.57648194", "0.5754705", "0.5751773", "0.5745028", "0.5718786", "0.5712504", "0.5710222", "0.5710222", "0.5709201", "0.5709201", "0.57047904", "0.57047904" ]
0.6814899
0
Set the client's service catalog from the response data.
def _extract_service_catalog(self, body): self.service_catalog = ServiceCatalog(body) try: sc = self.service_catalog.get_token() self.auth_token = sc['id'] self.auth_tenant_id = sc.get('tenant_id') self.auth_user_id = sc.get('user_id') except KeyError: raise exceptions.Unauthorized() if not self.endpoint_url: self.endpoint_url = self.service_catalog.url_for( attr='region', filter_value=self.region_name, service_type=self.service_type, endpoint_type=self.endpoint_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_srv_response(self, srvs):\n with self._context.lock:\n self._context.data[\"services\"] = srvs", "def _set_catalog(self, catalog: cat.Catalog) -> None:\n self._catalog_interface = CatalogInterface(catalog)\n self._catalog = catalog", "def deserialize_catalog(catalog, client):\n try:\n data = json.loads(catalog)\n except Exception:\n data = catalog\n for item in data:\n client.data.add_by_path(json.loads(item))\n return client", "def __init__(self, conn, iTag, srvType):\r\n self._responses = {}\r\n\r\n super(_ServiceClient, self).__init__(conn, iTag, srvType)", "def _extract_catalog(self, data):\n interface = 'public'\n catalog = data['token']['catalog']\n service_map = {}\n for service in catalog:\n service_endpoint = None\n for endpoint in service['endpoints']:\n if endpoint['interface'] == interface:\n service_endpoint = endpoint['url']\n break\n if service_endpoint:\n service_map[service['type']] = service_endpoint\n LOG.debug('Service catalog: %s' % service_map)\n return service_map", "def get_catalog_v0(self, catalog_id, **kwargs):\n # type: (str, **Any) -> Union[ApiResponse, object, BadRequestError_a8ac8b44, CatalogDetails_912693fa, Error_d660d58]\n operation_name = \"get_catalog_v0\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'catalog_id' is set\n if ('catalog_id' not in params) or (params['catalog_id'] is None):\n raise ValueError(\n \"Missing the required parameter `catalog_id` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v0/catalogs/{catalogId}'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n if 'catalog_id' in params:\n path_params['catalogId'] = params['catalog_id']\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.catalog.catalog_details.CatalogDetails\", status_code=200, message=\"Successful operation.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.bad_request_error.BadRequestError\", status_code=400, message=\"Server cannot process the request due to a client error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=401, message=\"The auth token is invalid/expired or doesn&#39;t have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=404, message=\"The resource being requested is not found.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=429, message=\"Exceed the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v0.error.Error\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"GET\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=\"ask_smapi_model.v0.catalog.catalog_details.CatalogDetails\")\n\n if full_response:\n return api_response\n return api_response.body", "def fake_catalog(tenant, token):\n catalog_gen = servicecatalog.ServiceCatalogGenerator(token, tenant)\n catalog = catalog_gen.generate_full_catalog()['access']\n return access.AccessInfoV2(**catalog)", "def catalog_id(self, catalog_id):\n self._catalog_id = catalog_id", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()", "def set_service(self):\n\n if self.service:\n self.service = self.service(\n json=self.json,\n google_user=self.google_user,\n endpoint=self\n )", "def __context_init(self):\n self._context.data[\"services\"] = copy.deepcopy(INITIAL_SRVDATA)", "def initCatalog():\n catalog = model.newCatalog()\n return catalog", "def initCatalog():\n catalog = model.newCatalog()\n return catalog", "def initCatalog():\n catalog = model.newCatalog()\n return catalog", "def getCatalogs():", "def _set_catalog_view(self, session):\n if self._catalog_view == FEDERATED:\n try:\n session.use_federated_catalog_view()\n except AttributeError:\n pass\n else:\n try:\n session.use_isolated_catalog_view()\n except AttributeError:\n pass", "def __init__(self):\n super(CatalogProxy, self).new_instance(\"catalog\", Catalog)", "def initCatalogA():\n return controller.initCatalogA()", "def initCatalogA():\n return controller.initCatalogA()", "def set_catalogue(self, catalogue, force_it=False,\n fast_setup=False):\n from .catalogue.basecatalogue import Catalogue\n\n if not fast_setup:\n if self.has_catalogue() and force_it is False:\n raise AttributeError(\"'catalogue' already defined\"+\\\n \" Set force_it to True if you really known what you are doing\")\n \n if Catalogue not in catalogue.__class__.__mro__:\n raise TypeError(\"the input 'catalogue' must be an astrobject Catalogue\")\n \n if hasattr(self,\"wcs\") and self.has_wcs():\n catalogue.set_wcs(self.wcs, force_it=True)\n if catalogue.nobjects_in_fov < 1:\n warnings.warn(\"WARNING No object in the field of view,\"+\"\\n\"+\\\n \" -> catalogue not loaded\")\n return\n \n # --------\n # - set it\n self._side_properties[\"catalogue\"] = catalogue", "def initCatalog(tad_list_type):\n catalog = model.newCatalog(tad_list_type)\n return catalog", "def __init__(self, catalog: cat.Catalog) -> None:\n self._catalog = catalog\n self._control_dict = self._create_control_dict()", "def initCatalog(list_type):\n catalog = model.newCatalog(list_type)\n return catalog", "def get_v3_catalog(self, user_id, tenant_id, metadata=None):\n v2_catalog = self.get_catalog(user_id, tenant_id, metadata=metadata)\n v3_catalog = []\n\n for region_name, region in six.iteritems(v2_catalog):\n for service_type, service in six.iteritems(region):\n service_v3 = {\n 'type': service_type,\n 'endpoints': []\n }\n\n for attr, value in six.iteritems(service):\n # Attributes that end in URL are interfaces. In the V2\n # catalog, these are internalURL, publicURL, and adminURL.\n # For example, <region_name>.publicURL=<URL> in the V2\n # catalog becomes the V3 interface for the service:\n # { 'interface': 'public', 'url': '<URL>', 'region':\n # 'region: '<region_name>' }\n if attr.endswith('URL'):\n v3_interface = attr[:-len('URL')]\n service_v3['endpoints'].append({\n 'interface': v3_interface,\n 'region': region_name,\n 'url': value,\n })\n continue\n\n # Other attributes are copied to the service.\n service_v3[attr] = value\n\n v3_catalog.append(service_v3)\n\n return v3_catalog", "def update_response(self, response):\r\n self.stri.update_response(response)", "def update_response(self, response):\r\n self.stri.update_response(response)" ]
[ "0.636587", "0.6025779", "0.5972524", "0.5868126", "0.56682014", "0.53990924", "0.53895503", "0.53510207", "0.5188658", "0.5188658", "0.5188658", "0.5188658", "0.5188658", "0.5160498", "0.51434344", "0.51307184", "0.51307184", "0.51307184", "0.51051885", "0.50766647", "0.5069019", "0.5034562", "0.5034562", "0.50111115", "0.50056326", "0.4996213", "0.499259", "0.4978532", "0.49603263", "0.49603263" ]
0.6341135
1
Returns the integer status code from the response. Either a Webob.Response (used in testing) or requests.Response is returned.
def get_status_code(self, response): if hasattr(response, 'status_int'): return response.status_int else: return response.status_code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status_code(self, response):\n if hasattr(response, 'status_int'):\n return response.status_int\n return response.status", "def _get_status_code(response: Response) -> int:\n status_code = response.status_code\n if isinstance(status_code, HTTPStatus):\n return status_code.value\n else:\n return status_code", "def get_status_code(self):\n return self.__response.status_code", "def status_code(self) -> Optional[int]:\n if self.response is not None:\n return self.response.status_code\n return None", "def response_code(self):\r\n return self._response_code", "def response_code(self):\n return self._response_code", "def response_code(self):\n return self._response_code", "def get_ResponseStatusCode(self):\n return self._output.get('ResponseStatusCode', None)", "def get_response_status(response_code):\n if is_success(response_code):\n return 'success'\n return 'error'", "def getResponseCode(self) -> int:\n ...", "def response_status(self):\n return self.__response_status", "def code(self):\n\t\treturn self.status_code", "def status_code(self) -> int:\n return pulumi.get(self, \"status_code\")", "def status_code(self):\n return self._status_code", "def status_code(self):\n return int(self.status.split()[1])", "def status_code(self) -> int:\n raise NotImplementedError # pragma: no cover", "def get_status_code(self, status_line):\n try:\n return int(status_line.split(' ')[1])\n except ValueError:\n return 400\n except IndexError:\n return 404", "def status_code(self):\n return int(self._status[:3])", "def status_code(self):\r\n return int(self._status[:3])", "def custom_block_response_status_code(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"custom_block_response_status_code\")", "def get_response_status_header(response: requests.Response) -> str:\n if hasattr(response, 'headers'):\n return response.headers.get(RESPONSE_STATUS_HEADER, '')\n return ''", "def custom_block_response_status_code(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"custom_block_response_status_code\")", "def custom_block_response_status_code(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"custom_block_response_status_code\")", "def get_status():\n return \"OK\" # defaults to a 200 HTML status return code", "def get_server_status_code(url):\n # http://stackoverflow.com/questions/1140661\n host, path = urlparse.urlparse(url)[1:3] # elems [1] and [2]\n try:\n conn = httplib.HTTPConnection(host)\n conn.request('HEAD', path)\n return conn.getresponse().status\n except StandardError:\n return None", "def http_response(status_code: int) -> Tuple[dict, int]:\n return ({'message': HTTP_STATUS_CODES.get(status_code, '')}, status_code)", "def get_status_code(host, path=\"/\"):\n try:\n conn = httplib.HTTPConnection(host)\n conn.request(\"HEAD\", path)\n return conn.getresponse().status\n except StandardError:\n return None", "def GetStatusCode(host, path=\"/\"):\n try:\n conn = HTTPConnection(host)\n conn.request(\"HEAD\", path)\n return conn.getresponse().status\n except Exception:\n return None", "def _postprocess_response(\n self, response: Union[BaseModel, Tuple[BaseModel, int]]\n ) -> Tuple[BaseModel, int, Optional[str]]:\n code = None\n\n if isinstance(response, tuple):\n response, code = response\n\n if self.is_raw_response(response):\n return response, code or 200, \"\"\n\n if type(response) not in self._responses.keys():\n raise UnexpectedResponseError(type(response))\n\n if code is None:\n if len(self._responses[type(response)]) > 1:\n raise InvalidResponseError({\"status_code\": [\"Missing status code\"]})\n code = next(iter(self._responses[type(response)].keys()))\n\n if code not in self._responses[type(response)].keys():\n raise UnexpectedResponseError(type(response), code)\n\n return response, code, self._responses[type(response)][code].mimetype", "def error_code(self):\n return self.json['response'].get('error_code')" ]
[ "0.8706974", "0.8433784", "0.81698084", "0.79378104", "0.7715872", "0.7612964", "0.7612964", "0.7554774", "0.7411476", "0.72669125", "0.7237965", "0.7231767", "0.72067887", "0.7142123", "0.71238506", "0.6979288", "0.6972776", "0.68875885", "0.6868603", "0.6837098", "0.6793999", "0.66185445", "0.66185445", "0.6612911", "0.6581566", "0.65000033", "0.64864624", "0.6480449", "0.6442402", "0.64203656" ]
0.86493224
1
Recursive method to convert data members to XML nodes.
def _to_xml_node(self, parent, metadata, nodename, data, used_prefixes): result = etree.SubElement(parent, nodename) if ":" in nodename: used_prefixes.append(nodename.split(":", 1)[0]) #TODO(bcwaldon): accomplish this without a type-check if isinstance(data, list): if not data: result.set( constants.TYPE_ATTR, constants.TYPE_LIST) return result singular = metadata.get('plurals', {}).get(nodename, None) if singular is None: if nodename.endswith('s'): singular = nodename[:-1] else: singular = 'item' for item in data: self._to_xml_node(result, metadata, singular, item, used_prefixes) #TODO(bcwaldon): accomplish this without a type-check elif isinstance(data, dict): if not data: result.set( constants.TYPE_ATTR, constants.TYPE_DICT) return result attrs = metadata.get('attributes', {}).get(nodename, {}) for k, v in sorted(data.items()): if k in attrs: result.set(k, str(v)) else: self._to_xml_node(result, metadata, k, v, used_prefixes) elif data is None: result.set(constants.XSI_ATTR, 'true') else: if isinstance(data, bool): result.set( constants.TYPE_ATTR, constants.TYPE_BOOL) elif isinstance(data, int): result.set( constants.TYPE_ATTR, constants.TYPE_INT) elif isinstance(data, long): result.set( constants.TYPE_ATTR, constants.TYPE_LONG) elif isinstance(data, float): result.set( constants.TYPE_ATTR, constants.TYPE_FLOAT) LOG.debug(_("Data %(data)s type is %(type)s"), {'data': data, 'type': type(data)}) if isinstance(data, str): result.text = unicode(data, 'utf-8') else: result.text = unicode(data) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_to_xml(data, xml=None):\n\n for element in data:\n name = element[0]\n val = element[1]\n if len(element) > 2:\n converter = element[2]\n else:\n converter = None\n\n if val is not None:\n if converter is not None:\n text = _str(converter(_str(val)))\n else:\n text = _str(val)\n\n entry = ET.Element(name)\n entry.text = text\n if xml is not None:\n xml.append(entry)\n else:\n return entry\n return xml", "def to_etree(self, data, options=None, name=None, depth=0):\r\n if isinstance(data, (list, tuple)):\r\n element = Element(name or 'objects')\r\n if name:\r\n element = Element(name)\r\n element.set('type', 'list')\r\n else:\r\n element = Element('objects')\r\n for item in data:\r\n element.append(self.to_etree(item, options, depth=depth + 1))\r\n elif isinstance(data, dict):\r\n if depth == 0:\r\n element = Element(name or 'response')\r\n else:\r\n element = Element(name or 'object')\r\n element.set('type', 'hash')\r\n for (key, value) in data.iteritems():\r\n element.append(self.to_etree(value, options, name=key, depth=depth + 1))\r\n else:\r\n element = Element(name or 'value')\r\n simple_data = self.to_simple(data, options)\r\n data_type = get_type_string(simple_data)\r\n\r\n if data_type != 'string':\r\n element.set('type', get_type_string(simple_data))\r\n\r\n if data_type != 'null':\r\n if isinstance(simple_data, unicode):\r\n element.text = simple_data\r\n else:\r\n element.text = force_unicode(simple_data)\r\n\r\n return element", "def xml2obj(self, src):\n\n\t\tclass DataNode(object):\n\t\t\tdef __init__(self):\n\t\t\t\tself._attrs = {} # XML attributes and child elements\n\t\t\t\tself.data = None # child text data\n\n\t\t\tdef __len__(self):\n\t\t\t\t# treat single element as a list of 1\n\t\t\t\treturn 1\n\n\t\t\tdef __getitem__(self, key):\n\t\t\t\tif isinstance(key, basestring):\n\t\t\t\t\treturn self._attrs.get(key,None)\n\t\t\t\telse:\n\t\t\t\t\treturn [self][key]\n\n\t\t\tdef __contains__(self, name):\n\t\t\t\treturn self._attrs.has_key(name)\n\n\t\t\tdef __nonzero__(self):\n\t\t\t\treturn bool(self._attrs or self.data)\n\n\t\t\tdef __getattr__(self, name):\n\t\t\t\tif name.startswith('__'):\n\t\t\t\t\t# need to do this for Python special methods???\n\t\t\t\t\traise AttributeError(name)\n\t\t\t\treturn self._attrs.get(name,None)\n\n\t\t\tdef _add_xml_attr(self, name, value):\n\t\t\t\tif name in self._attrs:\n\t\t\t\t\t\t# multiple attribute of the same name are represented by a list\n\t\t\t\t\t\tchildren = self._attrs[name]\n\t\t\t\t\t\tif not isinstance(children, list):\n\t\t\t\t\t\t\tchildren = [children]\n\t\t\t\t\t\t\tself._attrs[name] = children\n\t\t\t\t\t\tchildren.append(value)\n\t\t\t\telse:\n\t\t\t\t\tself._attrs[name] = value\n\n\t\t\tdef __str__(self):\n\t\t\t\treturn self.data or ''\n\n\t\t\tdef __repr__(self):\n\t\t\t\titems = sorted(self._attrs.items())\n\t\t\t\tif self.data:\n\t\t\t\t\titems.append(('data', self.data))\n\t\t\t\treturn u'{%s}' % ', '.join([u'%s:%s' % (k,repr(v)) for k,v in items])\n\n\t\tclass TreeBuilder(xml.sax.handler.ContentHandler):\n\t\t\tdef __init__(self):\n\t\t\t\tself.stack = []\n\t\t\t\tself.root = DataNode()\n\t\t\t\tself.current = self.root\n\t\t\t\tself.text_parts = []\n\t\t\t\tself.publicObjects = {}\n\n\t\t\tdef startElement(self, name, attrs):\n\t\t\t\tself.stack.append((self.current, self.text_parts))\n\t\t\t\tself.current = DataNode()\n\t\t\t\tself.text_parts = []\n\t\t\t\t# xml attributes --> python attributes\n\t\t\t\tfor k, v in attrs.items():\n\t\t\t\t\t# Register PublicObject in lookup map\n\t\t\t\t\tif k == \"publicID\":\n\t\t\t\t\t\tself.publicObjects[v] = self.current\n\t\t\t\t\tself.current._add_xml_attr(k, v)\n\n\t\t\tdef endElement(self, name):\n\t\t\t\ttext = ''.join(self.text_parts).strip()\n\t\t\t\tif text:\n\t\t\t\t\tself.current.data = text\n\t\t\t\tif self.current._attrs:\n\t\t\t\t\tobj = self.current\n\t\t\t\telse:\n\t\t\t\t\t# a text only node is simply represented by the string\n\t\t\t\t\tobj = text or ''\n\t\t\t\t\t# try to store the object as float if possible\n\t\t\t\t\ttry: obj = float(obj)\n\t\t\t\t\texcept: pass\n\t\t\t\tself.current, self.text_parts = self.stack.pop()\n\t\t\t\tself.current._add_xml_attr(name, obj)\n\n\t\t\tdef characters(self, content):\n\t\t\t\tself.text_parts.append(content)\n\n\t\tbuilder = TreeBuilder()\n\t\tif isinstance(src,basestring):\n\t\t\txml.sax.parseString(src, builder)\n\t\telse:\n\t\t\txml.sax.parse(src, builder)\n\t\treturn builder", "def prepare_node_attrs(self):", "def to_xml(self) -> str:\n # default name and stuff setup\n element_root, xml_tree = super()._add_basics()\n element_root = element_root.find('elementProp')\n element_root = element_root.find('collectionProp')\n for element in list(element_root):\n try:\n if element.attrib['name'] == 'influxdbUrl':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.influx_db_url:\n elem.text = self.influx_db_url\n elif element.attrib['name'] == 'application':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.application:\n elem.text = self.application\n elif element.attrib['name'] == 'measurement':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.measurement:\n elem.text = self.application\n elif element.attrib['name'] == 'summaryOnly':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value':\n elem.text = str(self.summary_only).lower()\n elif element.attrib['name'] == 'samplersRegex':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.samplers_regexp:\n elem.text = self.samplers_regexp\n elif element.attrib['name'] == 'percentiles':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.percentiles:\n elem.text = self.percentiles\n elif element.attrib['name'] == 'testTitle':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.test_title:\n elem.text = self.test_title\n elif element.attrib['name'] == 'eventTags':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.event_tags:\n elem.text = self.event_tags\n except Exception:\n raise Exception(f'Unable to render xml from {type(self).__class__}')\n return tree_to_str(xml_tree, hashtree=True)", "def datatoxml(data):\n return dicttoxml.dicttoxml(data,\n custom_root='records',\n attr_type=False,\n item_func=record_to_xml)", "def build(self):\n\n children = filter( lambda n: n.nodeType == n.ELEMENT_NODE, self.dom.childNodes[0].childNodes)\n for node in children:\n try:\n s = self.declare(node)\n except DeclareError as de:\n # is it the document info block\n if de.nodeType != \"docInfo\":\n raise de\n # has it got children, this is the only empty info block we allow\n if not node.hasChildNodes():\n continue\n # does it contain embedded xml, i.e. HTML tags\n if node.getAttribute(\"type\") == \"xml\":\n self.bumfText = \"\".join(map(lambda n: n.toxml(), node.childNodes))\n else:\n self.bumfText = node.childNodes[0].data\n else:\n self.data.append(s)\n\n # finally fix the typedefs (a bit like aliasing really)\n self.fixupTypedefs()", "def _build_tree(self, root, obj):\n\n if obj is None:\n return\n\n for attr_name in obj.__class__.__ordered__:\n if attr_name.startswith('_'):\n continue\n\n attr = getattr(obj.__class__, attr_name)\n\n if isinstance(attr, XmlElementProperty):\n element = root.add_child(attr.name)\n self._build_tree(element, getattr(obj, attr_name))\n elif isinstance(attr, XmlAttributeProperty):\n value = getattr(obj, attr_name)\n if value is not None:\n root.add_attribute(attr.name, value)", "def transform_all(self, node):\n # don't traverse, only handle field lists that are immediate children\n summary = []\n data = {}\n name, uid = _get_desc_data(node.parent)\n for child in node:\n if isinstance(child, remarks):\n remarks_string = transform_node(child)\n data['remarks'] = remarks_string\n elif isinstance(child, addnodes.desc):\n if child.get('desctype') == 'attribute':\n attribute_map = {} # Used for detecting duplicated attributes in intermediate data and merge them\n\n for item in child:\n if isinstance(item, desc_signature) and any(isinstance(n, addnodes.desc_annotation) for n in item):\n # capture attributes data and cache it\n data.setdefault('added_attribute', [])\n\n item_ids = item.get('ids', [''])\n\n if len(item_ids) == 0: # find a node with no 'ids' attribute\n curuid = item.get('module', '') + '.' + item.get('fullname', '')\n # generate its uid by module and fullname\n else:\n curuid = item_ids[0]\n\n if len(curuid) > 0:\n parent = curuid[:curuid.rfind('.')]\n name = item.children[0].astext()\n\n if curuid in attribute_map:\n if len(item_ids) == 0: # ensure the order of docstring attributes and real attributes is fixed\n attribute_map[curuid]['syntax']['content'] += (' ' + item.astext())\n # concat the description of duplicated nodes\n else:\n attribute_map[curuid]['syntax']['content'] = item.astext() + ' ' + attribute_map[curuid]['syntax']['content']\n else:\n if _is_desc_of_enum_class(node):\n addedData = {\n 'uid': curuid,\n 'id': name,\n 'parent': parent,\n 'langs': ['python'],\n 'name': name,\n 'fullName': curuid,\n 'type': item.parent.get('desctype'),\n 'module': item.get('module'),\n 'syntax': {\n 'content': item.astext(),\n 'return': {\n 'type': [parent]\n }\n }\n }\n else:\n addedData = {\n 'uid': curuid,\n 'class': parent,\n 'langs': ['python'],\n 'name': name,\n 'fullName': curuid,\n 'type': 'attribute',\n 'module': item.get('module'),\n 'syntax': {\n 'content': item.astext()\n }\n }\n\n attribute_map[curuid] = addedData\n else:\n raise Exception('ids of node: ' + repr(item) + ' is missing.')\n # no ids and no duplicate or uid can not be generated.\n if 'added_attribute' in data:\n data['added_attribute'].extend(attribute_map.values()) # Add attributes data to a temp list\n\n # Don't recurse into child nodes\n continue\n elif isinstance(child, nodes.field_list):\n (entries, types) = _hacked_transform(self.typemap, child)\n _data = get_data_structure(entries, types, child)\n data.update(_data)\n elif isinstance(child, addnodes.seealso):\n data['seealso'] = transform_node(child)\n elif isinstance(child, nodes.admonition) and 'Example' in child[0].astext():\n # Remove the admonition node\n child_copy = child.deepcopy()\n child_copy.pop(0)\n data['example'] = transform_node(child_copy)\n else:\n content = transform_node(child)\n\n # skip 'Bases' in summary\n if not content.startswith('Bases: '):\n summary.append(content)\n\n if \"desctype\" in node.parent and node.parent[\"desctype\"] == 'class':\n data.pop('exceptions', '') # Make sure class doesn't have 'exceptions' field.\n\n if summary:\n data['summary'] = '\\n'.join(summary)\n # Don't include empty data\n for key, val in data.copy().items():\n if not val:\n del data[key]\n data['type'] = PatchedDocFieldTransformer.type_mapping(node.parent[\"desctype\"]) if \"desctype\" in node.parent else 'unknown'\n self.directive.env.docfx_info_field_data[uid] = data\n super(PatchedDocFieldTransformer, self).transform_all(node)", "def structToXml(fmt, data, eParent, _depth=0):\n assert _depth < 10, \"Maximum depth exceeded\"\n if type(fmt) is str:\n #elem = ET.SubElement(eParent, 'value', {'type':fmt})\n eParent.text = str(data)\n elif type(fmt[0]) is tuple:\n for item in fmt:\n structToXml(item, data, eParent, _depth=_depth+1)\n else:\n if len(fmt) > 3:\n itemOffs, itemType, itemName, itemCount = fmt\n else:\n itemOffs, itemType, itemName = fmt\n itemCount = 1\n val = data[itemName]\n if itemName in ('gameBits0', 'gameBits1', 'gameBits2', 'gameBits3'): # HACK\n _bitTableToXml(itemName, val, eParent)\n elif itemCount > 1:\n for i in range(itemCount):\n elem = ET.SubElement(eParent, itemName, {'idx':str(i)})\n structToXml(itemType, val[i], elem, _depth=_depth+1)\n else:\n elem = ET.SubElement(eParent, itemName)\n structToXml(itemType, val, elem, _depth=_depth+1)", "def recursive_visit(self, node):\n node = self.generic_visit(node)\n\n # walk through the children: either iterate the node or look up the keys\n if hasattr(node, '_dict_keys'):\n for v in node._dict_keys:\n self.recursive_visit(getattr(node, v))\n\n if hasattr(node, '_list_keys'):\n for v in node._list_keys:\n self.recursive_visit(getattr(node, v))\n else:\n iter_target = None\n # need special handling of node.data or node_list in order to walk through all formatting node, e.g. endl\n if hasattr(node, 'node_list'): # use the unproxy list to get all formatting\n iter_target = node.node_list\n elif hasattr(node, 'data'):\n iter_target = node.data\n elif hasattr(node, '__iter__'):\n iter_target = node\n\n if iter_target:\n change_list = []\n for child in iter_target:\n new_node = self.recursive_visit(child)\n if new_node is not child:\n change_list.append((child, new_node))\n\n for original_child, new_child in change_list:\n i = original_child.index_on_parent\n iter_target.remove(original_child)\n iter_target.insert(i, new_child)\n\n return node", "def xml(self):\n raise NotImplementedError('must be implemented by all subclasses')", "def recursive_generation(t):\n d = {t.tag: {} if t.attrib else None}\n children = list(t)\n\n if children:\n dd = defaultdict(list)\n\n for dc in map(recursive_generation, children):\n for k, v in dc.iteritems():\n dd[k].append(v)\n\n d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.iteritems()}}\n\n if t.attrib:\n d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())\n\n if t.text:\n text = t.text.strip()\n\n if children or t.attrib:\n if text:\n d[t.tag]['#text'] = text\n else:\n d[t.tag] = text\n\n return d", "def _setup(self) -> None:\n\t\t# Set name from tag\n\t\tself.name = self._element.tag\n\n\t\t# Get attributes and add them to the attributes list\n\t\tfor attribute in self._element.attrib:\n\t\t\tself.attributes.append(attribute)\n\n\t\t# Get Children, add them to their respective lists\n\t\tchild: ET.Element\n\t\tfor child in self._element:\n\t\t\t# Determine if child is a SubNode or a Node\n\t\t\t# If child has children or attributes it is a Node\n\t\t\tif len(child) or len(child.attrib):\n\t\t\t\t# Add Node\n\t\t\t\tself.add_node(Node(child))\n\t\t\telse:\n\t\t\t\tself.add_subnode(SubNode(child))", "def convertNode(self, builder, typeName, data):\n\t\tif typeName not in self.nodeTypeMap:\n\t\t\traise Exception('Node type \"' + typeName + '\" hasn\\'t been registered.')\n\n\t\tconvertedData = self.nodeTypeMap[typeName](self, data)\n\n\t\ttypeNameOffset = builder.CreateString(typeName)\n\t\tdataOffset = builder.CreateByteVector(convertedData)\n\n\t\tObjectData.Start(builder)\n\t\tObjectData.AddType(builder, typeNameOffset)\n\t\tObjectData.AddData(builder, dataOffset)\n\t\treturn ObjectData.End(builder)", "def xml_dict(cls, data):\n xml = \"\"\n related_links = data.pop(config.LINKS, {}).pop(\"related\", {})\n ordered_items = OrderedDict(sorted(data.items()))\n for k, v in ordered_items.items():\n if isinstance(v, datetime.datetime):\n v = date_to_str(v)\n elif isinstance(v, (datetime.time, datetime.date)):\n v = v.isoformat()\n if not isinstance(v, list):\n v = [v]\n for idx, value in enumerate(v):\n if isinstance(value, dict):\n links = cls.xml_add_links(value)\n xml += cls.xml_field_open(k, idx, related_links)\n xml += cls.xml_dict(value)\n xml += links\n xml += cls.xml_field_close(k)\n else:\n xml += cls.xml_field_open(k, idx, related_links)\n xml += \"%s\" % escape(value)\n xml += cls.xml_field_close(k)\n return xml", "def encode_dataitem(dfd, tree):\n assert type(dfd) is dict or type(dfd) is list\n for c in tree.getElementsByTagName('DataItemFormat'):\n for d in c.childNodes:\n if d.nodeName == 'Fixed':\n return encode_fixed(dfd, d)\n else:\n if d.nodeName == 'Variable':\n return encode_variable(dfd, d)\n else:\n if d.nodeName == 'Repetitive':\n return encode_repetitive(dfd, d)\n else:\n if d.nodeName == 'Compound':\n return encode_compound(dfd, d)", "def _xml_convert(self, element):\n\n children = list(element)\n\n if len(children) == 0:\n return self._type_convert(element.text)\n else:\n # if the fist child tag is list-item means all children are list-item\n if children[0].tag == \"list-item\":\n data = []\n for child in children:\n data.append(self._xml_convert(child))\n else:\n data = {}\n for child in children:\n data[child.tag] = self._xml_convert(child)\n\n return data", "def _named_members(self, get_members_fn, prefix='', recurse=True):\n memo = set()\n modules = self.named_modules(prefix=prefix) if recurse else [(prefix, self)]\n for module_prefix, module in modules:\n members = get_members_fn(module)\n for k, v in members:\n if v is None or v in memo:\n continue\n memo.add(v)\n name = module_prefix + ('.' if module_prefix else '') + k\n # translate name to ori_name\n if name in self.node_name_map:\n name = self.node_name_map[name]\n yield name, v", "def toXML(self):\n return self._xmlpre+\"\\n\".join(map(lambda f:f.toXML(),self._items))+self._xmlpost", "def set_data(data, create_attrs=True, set_values=True, set_values_on_all=False, verbose=True):\n\n def set_value(node, attr, attr_data, verbose=False):\n \"\"\"Sets the value on specifed node from data \"\"\"\n\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n excluded_types = ['float2', 'float3', 'double2', 'double3',\n 'compound', 'message', 'short3', 'long2', 'long3']\n try:\n if not mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} doe not exist! Skipping..'.format(node, attr))\n return\n\n elif attr_type in excluded_types:\n return\n\n elif attr_type == 'string':\n if not value:\n value = ''\n mc.setAttr(node+'.'+attr, value, type='string')\n\n else:\n mc.setAttr(node+'.'+attr, value)\n\n if verbose:\n print 'Set attribute value: '+node+'.'+attr\n\n except:\n if verbose:\n mc.warning('Could not set '+attr_type+' attr value :'+node+'.'+attr)\n\n def add_attr(node, attr, attr_data, verbose=False):\n \"\"\"Actually add the attribbutes based on attr_dataDict\"\"\"\n\n parent = attr_data.get('parent')\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n # get parent and make sure it is a string\n if parent and type(parent) is list:\n parent = parent[0]\n\n # skip if the attr already exists\n if mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} already exists! Skipping..'.format(node, attr))\n return\n\n # add message attrs\n elif attr_type == 'message':\n mc.addAttr(node, ln=attr, at='message')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n # add compound attrs\n elif attr_type == 'compound':\n number_children = attr_data.get('number_children')\n\n try:\n if parent:\n mc.addAttr(node, ln=attr, at='compound', p=parent, k=keyable, number_children=number_children)\n else:\n mc.addAttr(node, ln=attr, at='compound', k=keyable, number_children=number_children)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add string attrs\n elif attr_type == 'string' :\n try:\n if parent:\n mc.addAttr(node, ln=attr, dt='string',p=parent)\n else:\n mc.addAttr(node, ln=attr, dt='string')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add enum attrs\n elif attr_type == 'enum':\n try:\n enum = attr_data.get('enum')\n default_value = attr_data.get('default_value')\n\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n\n elif attr_type == 'bool':\n try:\n default_value = attr_data.get('default_value') or 0\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n elif attr_type in ['float2', 'float3', 'double2', 'double3', 'short3', 'long2', 'long3']:\n try:\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n else:\n try:\n min_value = attr_data.get('min')\n max_value = attr_data.get('max')\n default_value = attr_data.get('default_value') or 0\n\n if parent:\n if min_value and max_value:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n if min_value is not None and max_value is not None:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n nodes = mc.ls(data.keys())\n\n # first create all compound and child attrs\n if not data:\n return\n\n for node in nodes:\n if verbose:\n print '\\n'\n\n node_data = data.get(node)\n if not node_data:\n continue\n\n node_data = node_data.get('data')\n ordered_attr_list = data.get(node).get('attr_order')\n\n # this is for only setting vcalues on newly created nodes\n # we doint want ot mess with whats already there.\n set_values_for = []\n\n # first create attrs\n if create_attrs:\n for attr in ordered_attr_list:\n attr_data = node_data.get(attr)\n result = add_attr(node, attr, attr_data, verbose=verbose)\n if result:\n set_values_for.append(attr)\n\n if set_values_on_all:\n set_values_for = ordered_attr_list\n\n # then set them\n for attr in set_values_for:\n attr_data = node_data.get(attr)\n set_value(node, attr, attr_data, verbose=verbose)", "def _loadData(self, data):\n Video._loadData(self, data)\n self.audioLanguage = data.attrib.get('audioLanguage', '')\n self.collections = self.findItems(data, media.Collection)\n self.guids = self.findItems(data, media.Guid)\n self.index = utils.cast(int, data.attrib.get('index'))\n self.key = self.key.replace('/children', '') # FIX_BUG_50\n self.labels = self.findItems(data, media.Label)\n self.leafCount = utils.cast(int, data.attrib.get('leafCount'))\n self.parentGuid = data.attrib.get('parentGuid')\n self.parentIndex = utils.cast(int, data.attrib.get('parentIndex'))\n self.parentKey = data.attrib.get('parentKey')\n self.parentRatingKey = utils.cast(int, data.attrib.get('parentRatingKey'))\n self.parentStudio = data.attrib.get('parentStudio')\n self.parentTheme = data.attrib.get('parentTheme')\n self.parentThumb = data.attrib.get('parentThumb')\n self.parentTitle = data.attrib.get('parentTitle')\n self.ratings = self.findItems(data, media.Rating)\n self.subtitleLanguage = data.attrib.get('audioLanguage', '')\n self.subtitleMode = utils.cast(int, data.attrib.get('subtitleMode', '-1'))\n self.viewedLeafCount = utils.cast(int, data.attrib.get('viewedLeafCount'))\n self.year = utils.cast(int, data.attrib.get('year'))", "def build_serializer(self):\n self._add_child_elements_recursive(self.get_root_element())", "def _build_tree(self, root, obj, declared_ns):\n\n if obj is None:\n return\n\n get_logger().debug('Building tree for %s (%s)', str(obj), root.name)\n for attr_name in obj.__class__.__ordered__:\n if attr_name.startswith('_'):\n continue\n\n attr = getattr(obj.__class__, attr_name)\n\n if isinstance(attr, XmlElementProperty):\n if not _attr_supports_version(attr, self.version):\n get_logger().debug('Skipping class attribute %s for not supported version %s',\n attr.name, self.version)\n continue\n\n child = getattr(obj, attr_name)\n if not _attr_element_content_serializable(attr, child):\n get_logger().debug('NOT Serializing Child Element %s (%s) because of its value',\n attr.name, attr_name)\n continue\n\n get_logger().debug('Serializing Child Element %s (%s)', attr.name, attr_name)\n self._serialize_object_to_node(root, attr.name, child, declared_ns, attr.kind)\n elif isinstance(attr, XmlAttributeProperty):\n if not _attr_supports_version(attr, self.version):\n get_logger().debug('Skipping class attribute %s for not supported version %s',\n attr.name, self.version)\n continue\n\n value = getattr(obj, attr_name)\n if value is not None:\n root.add_attribute(attr.name, value)", "def serialize(self, root):", "def xml_add_items(cls, data):\n try:\n xml = \"\".join(cls.xml_item(item) for item in data[config.ITEMS])\n except Exception:\n xml = cls.xml_dict(data)\n return xml", "def serialize(self, root):\n def serializeHelper(node, values):\n if node:\n values.append(node.val)\n serializeHelper(node.left, values)\n serializeHelper(node.right, values)\n values = []\n serializeHelper(root, values)\n return ' '.join(map(str, values))", "def create_data_sample(self) -> DataNode:\n root = DataNode(\"Grandpa\")\n\n node1 = DataNode(\"Node1\", parent=root)\n node2 = DataNode(\"Node2\", parent=root)\n node3 = DataNode(\"Node3\", parent=root)\n\n child1_1 = DataNode(\"Child1_1\", parent=node1)\n child1_2 = DataNode(\"Child1_2\", parent=node1)\n\n child2_1 = DataNode(\"Child2_1\", parent=node2)\n child2_2 = DataNode(\"Child2_2\", parent=node2)\n child2_3 = DataNode(\"Child2_3\", parent=node2)\n\n child3_1 = DataNode(\"Child3_1\", parent=node3)\n\n grandchild1_1_1 = DataNode(\"Grandchild1_1_1\", parent=child1_1)\n grandchild1_1_2 = DataNode(\"Grandchild1_1_2\", parent=child1_1)\n\n grandchild1_2_1 = DataNode(\"Grandchild1_2_1\", parent=child1_2)\n\n grandchild2_1_1 = DataNode(\"Grandchild2_1_1\", parent=child2_1)\n grandchild2_1_2 = DataNode(\"Grandchild2_1_2\", parent=child2_1)\n grandchild2_1_3 = DataNode(\"Grandchild2_1_3\", parent=child2_1)\n\n grandchild2_2_1 = DataNode(\"Grandchild2_2_1\", parent=child2_2)\n grandchild2_2_2 = DataNode(\"Grandchild2_2_2\", parent=child2_2)\n\n grandchild2_3_1 = DataNode(\"Grandchild2_3_1\", parent=child2_3)\n grandchild2_3_2 = DataNode(\"Grandchild2_3_2\", parent=child2_3)\n grandchild2_3_3 = DataNode(\"Grandchild2_3_3\", parent=child2_3)\n grandchild2_3_4 = DataNode(\"Grandchild2_3_4\", parent=child2_3)\n\n grandchild3_1_1 = DataNode(\"Grandchild3_1_1\", parent=child3_1)\n grandchild3_1_2 = DataNode(\"Grandchild3_1_2\", parent=child3_1)\n grandchild3_1_3 = DataNode(\"Grandchild3_1_3\", parent=child3_1)\n grandchild3_1_4 = DataNode(\"Grandchild3_1_4\", parent=child3_1)\n grandchild3_1_5 = DataNode(\"Grandchild3_1_5\", parent=child3_1)\n\n return root", "def _node2xmlfields(self, noderecord):\n recordtag = noderecord.pop(\"BOTSID\")\n del noderecord[\"BOTSIDnr\"]\n BOTSCONTENT = noderecord.pop(\"BOTSCONTENT\", None)\n # ***collect from noderecord all entities and attributes***************************\n attributemarker = self.ta_info[\"attributemarker\"]\n attributedict = {} # is a dict of dicts\n for key, value in noderecord.items():\n if attributemarker in key:\n field, attribute = key.split(attributemarker, 1)\n if not field in attributedict:\n attributedict[field] = {}\n attributedict[field][attribute] = value\n else:\n if not key in attributedict:\n attributedict[key] = {}\n # ***generate the xml-record-entity***************************\n xmlrecord = ET.Element(\n recordtag, attributedict.pop(recordtag, {})\n ) # pop from attributedict->do not use later\n # ***add BOTSCONTENT as the content of the xml-record-entity\n xmlrecord.text = BOTSCONTENT\n # ***generate the xml-field-entities within the xml-record-entity***************************\n for key in sorted(attributedict.keys()): # sorted: predictable output\n ET.SubElement(xmlrecord, key, attributedict[key]).text = noderecord.get(key)\n return xmlrecord", "def serialize(self, root):\n if not root:\n return \"\";\n res = str(root.val)\n if len(root.children) != 0:\n children_res = []\n for child in root.children:\n children_res.append(self.serialize(child))\n res = res + \"[\" + \" \".join(children_res) + \"]\"\n #print(res)\n return res" ]
[ "0.55837137", "0.55511117", "0.55470467", "0.5507946", "0.5457745", "0.5439807", "0.5430423", "0.5406567", "0.53041154", "0.52765614", "0.52231044", "0.511099", "0.5077055", "0.50718933", "0.50695", "0.5063672", "0.504861", "0.50350535", "0.5003575", "0.49517578", "0.49235386", "0.49187833", "0.49164274", "0.49142194", "0.4909659", "0.4896826", "0.48810977", "0.4877568", "0.484276", "0.483001" ]
0.6046614
0
Serialize a dictionary into the specified content type.
def serialize(self, data, content_type): return self._get_serialize_handler(content_type).serialize(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self, dictionary):\n dictionary['type'] = self.type_code", "def serialize_dict(container: Dict) -> Dict:\n for key, value in container.items():\n container[key] = serialize_obj(value)\n return container", "def serialize_dict(container: Dict) -> Dict:\n for key, value in container.items():\n container[key] = serialize_obj(value)\n return container", "def as_dict(self, key_type=None, value_type=None):\n\n if key_type is None and value_type is None:\n return self._contents.copy()\n\n def any_type(val): return val\n\n if key_type is None: key_type = any_type\n if value_type is None: value_type = any_type\n\n retval = OrderedDict()\n for key, val in iteritems(self._contents):\n try:\n key = key_type(key)\n except ValueError:\n message = \"Key <{}> = '{}' could not be converted to {}\".format(\n self.namespace, key, key_type\n )\n raise ConfigTypeError(message)\n\n try:\n val = val.as_type(value_type)\n except ValueError:\n message = \"Value <{}.{}> = '{}' could not be converted to {}\".format(\n self.namespace, key, val, key_type\n )\n raise ConfigTypeError(message)\n retval[key] = val\n return retval", "def serialize(self) -> dict:\n return {\n 'type': self.type,\n **self.args,\n }", "def encode(cls, dictionary: Dict[str, Any]) -> bytes:\n if not isinstance(dictionary, dict):\n raise TypeError( # pragma: nocover\n \"dictionary must be of dict type, got type {}\".format(type(dictionary))\n )\n patched_dict = copy.deepcopy(dictionary)\n cls._patch_dict(patched_dict)\n pstruct = Struct()\n pstruct.update(patched_dict) # pylint: disable=no-member\n return pstruct.SerializeToString()", "def _asdict(self) -> Dict[Text, Any]:\n return self.as_base_types()", "def create_dict(self, dict_type, upload_id, download_id,\n pub_user, module_supplier_id):\n response = self.do_request(\n self.base_url +\n \"/oasis/create\" + self.types[dict_type] + \"/\" +\n pub_user + \"/\" +\n str(module_supplier_id) + \"/\" +\n str(upload_id) + \"/\" +\n str(download_id) + \"/\"\n )\n return response", "def test_base_types(self):\r\n from adrest.utils.serializer import BaseSerializer\r\n try:\r\n from collections import OrderedDict\r\n except ImportError:\r\n from ordereddict import OrderedDict # nolint\r\n\r\n from datetime import datetime\r\n from decimal import Decimal\r\n\r\n serializer = BaseSerializer()\r\n data = dict(\r\n string_='test',\r\n unicode_=unicode('test'),\r\n datetime_=datetime(2007, 01, 01),\r\n odict_=OrderedDict(value=1),\r\n dict_=dict(\r\n list_=[1, 2.35, Decimal(3), False]\r\n )\r\n )\r\n\r\n value = serializer.serialize(data)\r\n self.assertEqual(value, dict(\r\n string_=u'test',\r\n unicode_=u'test',\r\n datetime_='2007-01-01T00:00:00',\r\n odict_=dict(value=1),\r\n dict_=dict(\r\n list_=[1, 2.35, 3.0, False]\r\n )\r\n ))", "def serialize_dict(d):\n txt = '{'\n for k in d:\n txt += f'\"{k}\":'\n if isinstance(d[k], dict):\n txt += serialize_dict(d[k])\n if isinstance(d[k], str):\n txt += serialize_string(d[k])\n if isinstance(d[k], int):\n txt += serialize_number(d[k])\n txt += ','\n txt += '}'\n return txt", "def custom_encode(obj):\n if isinstance(obj, DictionaryMethods):\n key = '__Dictionary__'\n return {key: [list(obj), obj.alpha, obj.pat, obj.pat_args,\n obj.auto_fields]}\n elif isinstance(obj, Entry):\n return obj.data\n else:\n raise TypeError(\"obj {!r} of type {}\".format(obj, type(obj)))", "def is_typed_dict(self) -> bool:\n return True", "def serialize_dict(dict_raw):\n dict_serialized = {}\n for (key, value) in dict_raw.items():\n if isinstance(value, list):\n dict_serialized[unidecode.unidecode(str(key))] = serialize_list(value)\n elif isinstance(value, dict):\n dict_serialized[unidecode.unidecode(str(key))] = serialize_dict(value)\n else:\n dict_serialized[unidecode.unidecode(str(key))] = unidecode.unidecode(str(value))\n return dict_serialized", "def serialize_dict(dict_raw):\n dict_serialized = {}\n for (key, value) in dict_raw.items():\n if isinstance(value, list):\n dict_serialized[unidecode.unidecode(str(key))] = serialize_list(value)\n elif isinstance(value, dict):\n dict_serialized[unidecode.unidecode(str(key))] = serialize_dict(value)\n else:\n dict_serialized[unidecode.unidecode(str(key))] = unidecode.unidecode(str(value))\n return dict_serialized", "def encode_metadata_dict(metadict):\n return _json.dumps(metadict, separators=(',', ':')).encode('ascii')", "def is_dictionary_type(self):\n raise exceptions.NotImplementedError()", "def serialize(self, request, content_type, default_serializers=None):\n\n if self.serializer:\n serializer = self.serializer\n else:\n _mtype, _serializer = self.get_serializer(content_type,\n default_serializers)\n serializer = _serializer()\n\n response = webob.Response()\n response.status_int = self.code\n for hdr, value in self._headers.items():\n response.headers[hdr] = str(value)\n response.headers['Content-Type'] = content_type\n if self.obj is not None:\n response.body = serializer.serialize(self.obj)\n\n return response", "def json(self) -> Dict[str, Union[List, Dict, str, int, float]]:", "def serialize(self, data):\r\n if data is None:\r\n return None\r\n elif type(data) is dict:\r\n return serializer.Serializer(\r\n self.get_attr_metadata()).serialize(data, self.content_type())\r\n else:\r\n raise Exception(_(\"Unable to serialize object of type = '%s'\") %\r\n type(data))", "def _check_keys(self, dict, filetype):\n self.filetype = filetype\n for key in dict:\n if isinstance(dict[key], scipy.io.matlab.mio5_params.mat_struct):\n dict[key] = self._todict(dict[key], self.filetype)\n return dict", "def transform_python(self, value: Entity) -> Dict:\n if self._schema_type:\n _schema_type: type = self._schema_type\n # noinspection PyTypeChecker\n _schema: Schema = _schema_type()\n _dict = _schema.dump(value)\n return _dict\n\n return dict(value)", "def serialize(self):\n\t\treturn { 'type': self.type, 'parameters' : self.parameters}", "def _is_valid_dict(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:dict\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 2:\n return False\n\n sub_type_1 = sub_types[0]\n sub_type_2 = sub_types[1]\n return _is_valid_pt(sub_type_1) and _is_valid_pt(sub_type_2)", "def schema_as_json(content_schema: Dict[str, n.SerializableType]) -> n.SerializableType:\n if \"properties\" in content_schema:\n current: Dict[str, Any] = {}\n properties = content_schema[\"properties\"]\n assert isinstance(properties, Dict)\n for prop, options in properties.items():\n if \"type\" not in options:\n current[prop] = \"Any\"\n elif options[\"type\"] == \"object\":\n current[prop] = schema_as_json(options)\n elif options[\"type\"] == \"array\":\n current[prop] = [schema_as_json(options[\"items\"])]\n else:\n current[prop] = options[\"type\"]\n\n return current\n\n if \"items\" in content_schema:\n assert isinstance(content_schema[\"items\"], Dict)\n return [schema_as_json(content_schema[\"items\"])]\n\n return content_schema[\"type\"]", "def asdict():\n pass", "def write_dictionary(args, dictio):\n if not args.dictfile.endswith(\".file\"):\n args.dictfile += \".file\"\n with open(args.dictfile, \"wb\") as f:\n dump(dictio, f, protocol=HIGHEST_PROTOCOL)", "def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'type') and self.type is not None:\n _dict['type'] = self.type\n return _dict", "def to_dict(self) -> Dict:\n _dict = {}\n if hasattr(self, 'type') and self.type is not None:\n _dict['type'] = self.type\n return _dict", "def _encode_dictionary(data, name=\"Second\", sub=False):\n\n if sub:\n root = ET.Element(\"Field\", {\"Name\": f'{name}', \"Type\": \"elsystem.collections.dictionary\"})\n else: \n root = ET.Element(\"elsystem.collections.dictionary\")\n\n items = ET.SubElement(root, 'Field', {'Name': 'Items', 'Type': 'elsystem.collections.vector'})\n\n index = 0\n\n for key, val in data.items():\n\n pair = ET.SubElement(items, 'Field', {'Name': f'E{index}', 'Type': 'elsystem.collections.pair'})\n \n if type(val) == dict:\n ET.SubElement(pair, 'Field', {'Name': 'First', 'Value': _encode_value(key)}) \n sub_dict = _encode_dictionary(data=val, name=\"Second\", sub=True)\n pair.append(sub_dict)\n elif type(val) == list:\n ET.SubElement(pair, 'Field', {'Name': 'First', 'Value': _encode_value(key)}) \n sub_vec = _encode_list(data=val, name=F'E{index}', sub=True)\n pair.append(sub_vec)\n else:\n ET.SubElement(pair, 'Field', {'Name': 'First', 'Value': _encode_value(key)}) \n ET.SubElement(pair, 'Field', {'Name': 'Second', 'Value': _encode_value(val)}) \n\n index += 1\n\n ET.SubElement(items, 'Field', {'Name': 'count', 'Value': _encode_value(index)})\n\n if sub:\n return root \n else:\n return ET.tostring(root)", "def render_dict(dict):\n\t\treturn str.encode(str(dict))" ]
[ "0.6238056", "0.57195693", "0.57195693", "0.56067234", "0.5586985", "0.5327035", "0.5265228", "0.521467", "0.51638", "0.5133741", "0.5116739", "0.5113826", "0.5064935", "0.5064935", "0.5040795", "0.5020376", "0.49975267", "0.498446", "0.49824035", "0.49813008", "0.49763197", "0.49461874", "0.4942085", "0.49037915", "0.48650756", "0.48640084", "0.48629108", "0.48629108", "0.48571616", "0.48540705" ]
0.6142456
1
Deserialize a string to a dictionary. The string must be in the format of a supported MIME type.
def deserialize(self, datastring, content_type): return self.get_deserialize_handler(content_type).deserialize( datastring)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mmtf_bytes_to_mmtf_dict(bytestring):\n\n raw = msgpack.unpackb(bytestring)\n return decode_dict(raw)", "def parse_bytes_to_dict(bytes_to_parse):\n return ast.literal_eval(bytes_to_parse.decode(\"utf-8\"))", "def json_loads(s: Union[bytes, str]) -> Dict[str, Any]:\n return json.loads(ensure_text(s, \"utf-8\"))", "def decode(cls, buffer: bytes) -> Dict[str, Any]:\n pstruct = Struct()\n pstruct.ParseFromString(buffer)\n dictionary = dict(pstruct)\n cls._patch_dict_restore(dictionary)\n return dictionary", "def parse_from_string(self, file_content: str):\n self._split_to_tokens(file_content)\n if not self._convert_tokens_to_dict():\n log.error('Failed to generate dictionary representation of file.')\n return None\n return self._result", "def yaml_str_to_schema_dict(yaml_str: str) -> dict:\n schema = load(yaml_str, get_type_schema_yaml_validator())\n revalidate_typeschema(schema)\n return schema.data", "def read_string(self, string, **kwds):\n self._dict.update(json.loads(string))", "def from_string(string):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n return json.loads(string)", "def str2dict(string):\n res_dict = {}\n for keyvalue in string.split(','):\n (key, value) = keyvalue.split('=', 1)\n res_dict[key] = value\n return res_dict", "async def parse(self, raw: str) -> dict:", "def parse_content_type(value): # pylint: disable = W0621\n try:\n if isinstance(value, unicode):\n value.encode('ascii')\n else:\n value.decode('ascii')\n except (AttributeError, UnicodeError):\n return None\n\n match = typere.match(value)\n if not match:\n return None\n\n parsed = (match.group(1).lower(), {})\n match = match.group(2)\n if match:\n for key, val in pairre.findall(match):\n if val[:1] == '\"':\n val = stripre.sub(r'', val[1:-1]).replace(r'\\\"', '\"')\n parsed[1].setdefault(key.lower(), []).append(val)\n\n return parsed", "def deserialize(self, val_str):\n\n serializer = self.get_serializer()\n obj_dict = serializer.deserialize(val_str)\n\n return obj_dict", "def message_to_dict(message):\n message_dict = {}\n if isinstance(message, str):\n tmp = re.sub(\"[{}\\\"]\", '', message).split(',')\n for string in tmp:\n var = string.split(':')\n message_dict[var[0]] = var[1]\n return message_dict", "def str2dic(self, string):\n dic = {}\n list0=string.split(\"&\")\n for i in list0:\n list2 = i.split(\"=\")\n dic[list2[0]] = list2[1]\n return dic", "def json_loads(self, string: str) -> object:\n return json.loads(string)", "def parse_from_str(self, config_str):\n if not config_str:\n return {}\n config_dict = {}\n try:\n for kv_pair in config_str.split(','):\n if not kv_pair: # skip empty string\n continue\n k, v = kv_pair.split('=')\n config_dict[k.strip()] = eval_str_fn(v.strip())\n return config_dict\n except ValueError:\n raise ValueError('Invalid config_str: {}'.format(config_str))", "def _decode_dict(data: BencodedString) -> dict:\n result_dict = {}\n data.del_prefix(1)\n\n while True:\n if data.bytes:\n if data.bytes[0] != END_MARKER:\n key = _decode(data)\n value = _decode(data)\n result_dict[key] = value\n else:\n data.del_prefix(1)\n break\n else:\n raise ValueError(\n \"Cannot decode a dictionary, reached end of the bencoded \"\n \"string before the end marker was found. Most likely the \"\n \"bencoded string is incomplete or incorrect.\"\n )\n\n return result_dict", "def makeDict(self, s):\n out = {}\n entries = s.split(self.dataDelimiterEntry)\n for e in entries:\n if e == \"\":\n continue\n c = e.split(self.dataDelimiterKey)\n out[c[0]] = c[1]\n return out", "def decodemeta(data):\n d = {}\n for l in data.split('\\0'):\n if l:\n key, value = l.split(':')\n d[key] = value\n return d", "def _from_string(cls, serialized):\r\n parse = cls.URL_RE.match(serialized)\r\n if not parse:\r\n raise InvalidKeyError(cls, serialized)\r\n\r\n parse = parse.groupdict()\r\n if parse['definition_id']:\r\n parse['definition_id'] = cls.as_object_id(parse['definition_id'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})", "def parse_mimetype(mimetype: str) -> MimeType:\n if not mimetype:\n return MimeType(\n type=\"\", subtype=\"\", suffix=\"\", parameters=MultiDictProxy(MultiDict())\n )\n\n parts = mimetype.split(\";\")\n params: MultiDict[str] = MultiDict()\n for item in parts[1:]:\n if not item:\n continue\n key, _, value = item.partition(\"=\")\n params.add(key.lower().strip(), value.strip(' \"'))\n\n fulltype = parts[0].strip().lower()\n if fulltype == \"*\":\n fulltype = \"*/*\"\n\n mtype, _, stype = fulltype.partition(\"/\")\n stype, _, suffix = stype.partition(\"+\")\n\n return MimeType(\n type=mtype, subtype=stype, suffix=suffix, parameters=MultiDictProxy(params)\n )", "def from_json_string(my_str):\n return loads(my_str)", "def parse_prefs_file(prefs_string):\r\n try:\r\n prefs = dict(eval(prefs_string))\r\n except TypeError:\r\n raise QiimeParseError(\r\n \"Invalid prefs file. Prefs file must contain a valid prefs dictionary.\")\r\n return prefs", "def parse_string_dict(dict_as_string):\n new_dict = ast.literal_eval(dict_as_string[1:-1])\n new_dict = {key: parse_string(val) for key, val in new_dict.items()}\n return new_dict", "def from_json_string(my_str):\n import json\n return json.loads(my_str)", "def __parse_message_as(msg_type: type, msg_str: str) -> Any:\n # parse the message\n msg_dict = json.loads(msg_str)\n\n # the type specified in the message needs to match\n # the type we are parsing as\n assert msg_dict[MSG_TYPE_NAME] == msg_type.__name__, \\\n f\"Message type did not match the parsing type,\" \\\n f\"parsing the message as type {msg_type.__name__},\" \\\n f\"but get a message of type {msg_dict[MSG_TYPE_NAME]}\"\n\n # remove the message type information, and create the object\n del msg_dict[MSG_TYPE_NAME]\n return msg_type(**msg_dict)", "def parse_email(msg):\n eml_dict = {}\n psr = Parser()\n parsed_eml = psr.parsestr(msg)\n eml_dict.update(parsed_eml)\n eml_dict['Body'] = parsed_eml.get_payload()\n return eml_dict", "def parse_mime(self, mtype):\n parts = mtype.split(';')\n params = OrderedDict()\n\n # Split parameters and convert numeric values to a Decimal object.\n for k, v in [param.split('=', 1) for param in parts[1:]]:\n k = k.strip().lower()\n v = v.strip().strip('\\'\"')\n\n if self._parm_val_lower:\n v = v.lower()\n\n try:\n v = Decimal(v)\n except InvalidOperation:\n if k == 'q':\n v = Decimal(\"1.0\")\n\n params[k] = v\n\n # Add/fix quality values.\n quality = params.get('q')\n\n if ('q' not in params\n or quality > Decimal(\"1.0\")\n or quality < Decimal(\"0.0\")):\n params['q'] = Decimal(\"1.0\")\n\n full_type = parts[0].strip().lower()\n\n # Fix non-standard single asterisk.\n if full_type == '*':\n full_type = '*/*'\n\n type, sep, subtype = full_type.partition('/')\n\n if '+' in subtype:\n idx = subtype.rfind('+')\n suffix = subtype[idx+1:].strip()\n subtype = subtype[:idx]\n else:\n suffix = ''\n\n return type.strip(), subtype.strip(), suffix, params", "def parse_data(self):\n data = {}\n content = self.headers.get('content-type', None)\n if content:\n ctype, pdict = parse_header(content)\n if ctype == 'application/json':\n length = int(self.headers['content-length'])\n data = json.loads(self.bytes_to_str(self.rfile.read(length)))\n return data", "def make_data_dict_from_str(self,reg_exp,data_str):\n data_list=reg_exp.split(data_str)\n data_list.pop(0)\n data_dict=dict(zip(data_list[0::2],data_list[1::2]))\n # get rid of \\n at the end of the strings\n reg_exp_strip_n=re.compile(r'\\n$')\n for key in data_dict.keys():\n data_dict[key]=reg_exp_strip_n.sub('',data_dict[key])\n return data_dict" ]
[ "0.63770974", "0.6286723", "0.6270054", "0.62124544", "0.5952388", "0.59262204", "0.59089375", "0.59001184", "0.5857306", "0.5854179", "0.583345", "0.58254915", "0.5811731", "0.5762951", "0.5674565", "0.5623781", "0.56214863", "0.5615988", "0.5598508", "0.5587685", "0.55840296", "0.55502707", "0.5548566", "0.5508894", "0.55053353", "0.55014515", "0.5494987", "0.54935575", "0.5469368", "0.5436816" ]
0.632472
1
Returns the first environment variable set. if none are nonempty, defaults to '' or keyword arg default.
def env(*vars, **kwargs): for v in vars: value = os.environ.get(v) if value: return value return kwargs.get('default', '')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def env(*vars, **kwargs):\n for v in vars:\n value = os.environ.get(v, None)\n if value:\n return value\n return kwargs.get('default', '')", "def env(*vars, **kwargs):\n for v in vars:\n value = os.environ.get(v, None)\n if value:\n return value\n return kwargs.get('default', '')", "def env(*vars, **kwargs):\n for v in vars:\n value = os.environ.get(v)\n if value:\n return value\n return kwargs.get('default', '')", "def env(*_vars, **kwargs):\r\n for v in _vars:\r\n value = os.environ.get(v, None)\r\n if value:\r\n return value\r\n return kwargs.get('default', '')", "def env(*_vars, **kwargs):\n for v in _vars:\n value = os.environ.get(v, None)\n if value:\n return value\n return kwargs.get('default', '')", "def _default_getter(environ, metadata, prefix, name):\n ce = metadata[CNF_KEY]\n var = ce.name if ce.name is not None else \"_\".join((*prefix, name)).upper()\n log.debug(\"looking for env var '%s'.\", var)\n try:\n return environ[var]\n except KeyError:\n raise MissingEnvValueError(var) from None", "def get_environment_var(env_name, default_value):\n if env_name in os.environ:\n return os.environ[env_name]\n else:\n return default_value", "def get_from_environ(key: str, default: Any = None) -> str:\n return os.environ.get(key, default)", "def get_env_variable(var_name, default_value=None):\n try:\n return os.environ[var_name]\n except KeyError:\n error_msg = \"Set the %s environment variable\" % var_name\n print(error_msg)\n\n return default_value", "def getenv(name, default=None):\n return os.environ.get(name, default)", "def parameter_environment_or_default(parameter, env_var: str, default):\n if parameter is not None:\n return parameter\n if env_var in os.environ:\n return os.environ[env_var]\n return default", "def _get_env(key, default=None, coerce=lambda x: x, required=False):\n try:\n value = os.environ[key]\n except KeyError:\n if required is True:\n raise RequiredSettingMissingError(key)\n else:\n return default\n\n try:\n return coerce(value)\n except Exception:\n raise CoercionError(key, value, coerce)", "def get_env(env_name: str, default: Optional[str] = None) -> str:\n if env_name not in os.environ:\n if default is None:\n raise KeyError(f\"{env_name} not defined and no default value is present!\")\n return default\n\n env_value: str = os.environ[env_name]\n if not env_value:\n if default is None:\n raise ValueError(\n f\"{env_name} has yet to be configured and no default value is present!\"\n )\n return default\n\n return env_value", "def GetEnvironFallback(var_list, default):\n for var in var_list:\n if var in os.environ:\n return os.environ[var]\n return default", "def env(key, default=None, required=False):\n try:\n value = os.environ[key]\n return ast.literal_eval(value)\n except (SyntaxError, ValueError):\n return value\n except KeyError:\n if default or not required:\n return default\n raise ImproperlyConfigured(\n \"Missing required environment variable '%s'\" % key)", "def get_env_variable(var_name, default_value=None):\n try:\n return os.environ[var_name]\n except KeyError:\n error_msg = 'Set the {} environment variable'.format(var_name)\n if default_value is not None:\n return default_value\n raise ValueError(error_msg)", "def get_env_setting(setting, default=None):\n try:\n return os.environ[setting]\n except KeyError:\n if default is not None:\n return default\n else:\n error_msg = ('The {} env variable was not found '\n 'and no default was set!').format(setting)\n raise ImproperlyConfigured(error_msg)", "def get_env_variable(var_name, default=None):\n if default is not None:\n return _fix_booleans(environ.get(var_name, default))\n try:\n return _fix_booleans(environ[var_name])\n except KeyError:\n error_msg = \"Please set the %s environment variable\" % var_name\n raise KeyError(error_msg)", "def get(self, key, default=None):\n value = os.environ.get(key)\n\n if value:\n self.logging.info(\"Got %s from environment.\" % key)\n self.logging.debug(value)\n return_val = value\n elif key in self._config.keys():\n self.logging.info(\"Got %s from config file.\" % key)\n self.logging.debug(value)\n return_val = self._config[key]\n else:\n return_val = default\n return return_val", "def maybe_environ(key):\n try:\n return os.environ[key]\n except KeyError:\n return \"\"", "def getenv_string(setting, default=''):\n return os.environ.get(setting, default)", "def get_envint(key, *default):\n return get_env(key, *default, coerce=_int)", "def get_default(name, value):\n return os.environ.get('EXAMPLE_{}'.format(name.upper()), value)", "def env_var(key, default=None):\n val = os.environ.get(key, default)\n if val == 'True':\n val = True\n elif val == 'False':\n val = False\n return val", "def env(key: str) -> Optional[Any]:\n return os.getenv(key)", "def env_get_var_value(var_name, allow_missing=False):\n if allow_missing:\n if var_name not in os.environ.keys():\n return None\n assert var_name in os.environ.keys(), \"Please supply %s in environment\" % var_name\n return os.environ[var_name]", "def env(key, default=None):\n val = os.getenv(key, default)\n\n if val == 'True':\n val = True\n elif val == 'False':\n val = False\n return val", "def get_env_setting(setting, default=None):\n try:\n return environ[setting]\n except KeyError:\n if default:\n return default\n else:\n error_msg = \"Set the %s env variable\" % setting\n raise StandardError(error_msg)", "def get_env(key, *default, **kwargs):\n assert len(default) in (0, 1), \"Too many args supplied.\"\n func = kwargs.get('coerce', lambda x: x)\n required = len(default) == 0\n default = default[0] if not required else None\n return _get_env(key, default=default, coerce=func, required=required)", "def get_required_env_variable(var_name):\n try:\n return os.environ[var_name]\n except KeyError:\n error_msg = 'Set the {0} environment variable'.format(var_name)\n raise ImproperlyConfigured(error_msg)" ]
[ "0.7659608", "0.7659608", "0.765618", "0.754722", "0.75204015", "0.70700836", "0.704792", "0.6990025", "0.69792265", "0.69578755", "0.6949888", "0.68441415", "0.6825165", "0.6794135", "0.6680647", "0.66783625", "0.66763157", "0.6639027", "0.6628669", "0.66113937", "0.6609096", "0.65427977", "0.651565", "0.6494282", "0.6493776", "0.6473926", "0.64734125", "0.64475405", "0.64446664", "0.6441458" ]
0.76820445
0
Returns the client class for the requested API version
def get_client_class(api_name, version, version_map): try: client_path = version_map[str(version)] except (KeyError, ValueError): msg = _("Invalid %(api_name)s client version '%(version)s'. must be " "one of: %(map_keys)s") msg = msg % {'api_name': api_name, 'version': version, 'map_keys': ', '.join(version_map.keys())} raise exceptions.UnsupportedVersion(msg) return import_class(client_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def get(host, port=None, version=None):\n port = 8081 if port is None else port\n version = \"v1\" if version is None else version\n\n if version not in VERSIONS.keys():\n raise RestException(f\"Unknown REST API version: {version}\")\n api_client_cls = VERSIONS[version]\n return api_client_cls(host=host, port=port)", "def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)", "def get_client(version, **kwargs):\n endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')\n\n return Client(version, endpoint, **kwargs)", "def get_api_version(self):\n major, minor, patch = self.client.config['api_version']\n return '%s.%s.%s' % (major, minor, patch)", "def test_get_api_v1_client(self):\n\n client = get_api_v1_client()\n self.assertEqual(type(client), Client)", "def get_client_version(self):\n return self.__aceQLHttpApi.get_client_version()", "def query_api_version(self):\n version_resp = self._session.get('/api/version',\n logon_required=False)\n self._api_version = version_resp\n return self._api_version", "def get_version(self):\n return self.api_version", "def get_api_version(self):\n from webapi import VERSION\n return '.'.join(map(str, VERSION))", "def get_api_ironic_client(get_session):\n def _get_api_ironic_client(version, is_api):\n if version == '1':\n if is_api:\n return api_clients.IronicApiClientV1(session=get_session())\n else:\n return client_v1.get_client(api_version=version,\n session=get_session())\n\n raise ValueError(\"Unexpected ironic version: {!r}\".format(version))\n\n return _get_api_ironic_client", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def get_api_version(session: \"Session\") -> str:\n component_versions = get_component_versions(session)\n return str(component_versions.get(CoordConsts.KEY_API_VERSION, \"2.0.0\"))", "def get_api_version(self):\n return self.connector.request('GET', '/app/webapiVersion')", "def api_client() -> APIClient:\n return APIClient()", "def GetClientInstance(release_track=calliope_base.ReleaseTrack.ALPHA):\n api_version = _RELEASE_TRACK_TO_API_VERSION.get(release_track)\n return core_apis.GetClientInstance(_API_NAME, api_version)", "def ironic_client_v1(get_api_ironic_client):\n return get_api_ironic_client(version='1', is_api=False)", "def api_version() -> APIVersion:\n return MAX_SUPPORTED_VERSION", "def get_vc3_client():\n c = SafeConfigParser()\n c.readfp(open(app.config['VC3_CLIENT_CONFIG']))\n\n try:\n client_api = client.VC3ClientAPI(c)\n return client_api\n except Exception as e:\n app.logger.error(\"Couldn't get vc3 client: {0}\".format(e))\n raise", "def api_version(self):\n\n\t\treturn self._api_version", "def api_client() -> APIClient:\n\n return APIClient()", "def get(cls, configuration: HttpClientConfiguration) -> HttpClient:\n client_type = configuration.client_type\n\n if client_type == HttpClientType.UAA:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_UAA)\n\n elif client_type == HttpClientType.CONSOLE:\n return cls._get_instance(configuration, ClientAuthType.LOGIN_PAGE)\n\n elif client_type == HttpClientType.CONSOLE_NO_AUTH:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.APPLICATION:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.CLOUD_FOUNDRY:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.BROKER:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n elif client_type == HttpClientType.WEBHDFS:\n return cls._get_instance(configuration, ClientAuthType.WEBHDFS)\n \n elif client_type == HttpClientType.SERVICE_TOOL:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.CLOUDERA:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n else:\n raise HttpClientFactoryInvalidClientTypeException(client_type)", "def get_api(self):\n return self.api", "def api(self):\n res = self.client.call('/', 'GET', data='')\n self.logger.debug('Get openstack identity api versions: %s' % truncate(res))\n return res[0]", "def get_version(self):\n return self.__make_api_call('get/version')", "def get(self, version):\n version_found = False\n api_spec = self._create_api_spec(version)\n for base_api in current_app.appbuilder.baseviews:\n if isinstance(base_api, BaseApi) and base_api.version == version:\n base_api.add_api_spec(api_spec)\n version_found = True\n if version_found:\n return self.response(200, **api_spec.to_dict())\n else:\n return self.response_404()", "def get_klass(response):\n if not RESOURCE_MAP:\n load_resource_map()\n klass_name = response.get('resource')\n klass = RESOURCE_MAP.get(klass_name) or resource_class\n\n # provide api_client only for resource classes\n return klass(api_client=api_client, data=response) \\\n if klass else APIObject(data=response)", "def client_version(self) -> str:\n return pulumi.get(self, \"client_version\")", "def _create_client_impl(self, api_version):\n if api_version == v7_2_VERSION:\n from .v7_2 import KeyVaultClient as ImplClient\n else:\n raise NotImplementedError(\"APIVersion {} is not available\".format(api_version))\n\n impl = ImplClient(credentials=self._credentials)\n impl.config = self.config\n\n # if __enter__ has previously been called and the impl client has __enter__ defined we need to call it\n if self._entered and hasattr(impl, '__enter__'):\n impl.__enter__()\n\n self._client_impls[api_version] = impl\n return impl", "def api_version(self):\n\n return self._api_version" ]
[ "0.7603729", "0.69440734", "0.6819133", "0.6434926", "0.6426967", "0.64204526", "0.6384498", "0.633332", "0.63202596", "0.62628543", "0.62604326", "0.6218951", "0.62035966", "0.61787295", "0.61738944", "0.6160897", "0.6139946", "0.6088289", "0.60710144", "0.60686547", "0.60569715", "0.6039539", "0.60169494", "0.6002307", "0.5986078", "0.5984285", "0.5952152", "0.59457314", "0.5938394", "0.58893657" ]
0.8210274
0
Return a tuple containing the item properties.
def get_item_properties(item, fields, mixed_case_fields=[], formatters={}): row = [] for field in fields: if field in formatters: row.append(formatters[field](item)) else: if field in mixed_case_fields: field_name = field.replace(' ', '_') else: field_name = field.lower().replace(' ', '_') if not hasattr(item, field_name) and isinstance(item, dict): data = item[field_name] else: data = getattr(item, field_name, '') if data is None: data = '' row.append(data) return tuple(row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def items(self) -> Tuple[Item]:\n return tuple(self.__items)", "def items(self) -> tuple[tuple[Any, Any], ...]: # type: ignore\n return tuple(zip(self.keys(), self.values()))", "def getProperties():", "def items(self) -> tuple[tuple[Hashable, Any], ...]:\n return tuple(zip(self.keys(), self.values()))", "def get_properties():", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def get_info_in_tuple(self):\r\n return self.key, self.value, self.get_color(), self.size_tree", "def properties(self):\n return self.properties_with_uid[1:]", "def properties(self):\n return self._props", "def items(self) -> typing.Tuple[tuple]:\n\n return (\n (LEFT_PES, self.left_pes),\n (RIGHT_PES, self.right_pes),\n (LEFT_MANUS, self.left_manus),\n (RIGHT_MANUS, self.right_manus)\n )", "def properties(self) -> List[ProductionFlowItemProperty]:\n return self._properties", "def props(self):\n return self._props", "def props(self):\n return self._props", "def get_properties(self):\n return self.properties", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def items(self):\r\n return [(k, self[k]) for k in self]", "def get_properties(self):\n return self.properties", "def items(self):\n return [(k, self[k]) for k in self.keys()]", "def make_item_tuple(self, item):\r\n filtered_item = self.filter(item)\r\n lst = [filtered_item._fullname]\r\n for col in self.sort_cols:\r\n #take the property of the original \r\n attr = getattr(item, col)\r\n #convert dates to epochs to take less space\r\n if isinstance(attr, datetime):\r\n attr = epoch_seconds(attr)\r\n lst.append(attr)\r\n return tuple(lst)", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def getProperties(self):\n return self.properties", "def get(self) -> tuple:", "def props(self):\n return self.wc_status.attrib['props']", "def items(self):\n return [ (x, self[x]) for x in self ]", "def properties_get(self):\n return self._get('properties')", "def _tp__get_typed_properties(self):\n try:\n return tuple(getattr(self, p) for p in self._tp__typed_properties)\n except AttributeError:\n raise NotImplementedError", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def as_tuple(self):\n return self.value, self.name" ]
[ "0.7156068", "0.6725318", "0.6662332", "0.6658762", "0.66519535", "0.6637251", "0.6589536", "0.65703964", "0.65554535", "0.64712626", "0.6470728", "0.6409408", "0.6409408", "0.6399682", "0.63823044", "0.63823044", "0.6360324", "0.63506", "0.63309497", "0.6298649", "0.62954336", "0.6250145", "0.620062", "0.61827296", "0.61685044", "0.6139938", "0.6124508", "0.6119961", "0.6119961", "0.6101034" ]
0.6922289
1
Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's install() function. The main difference from gettext.install() is that we allow overriding the default localedir (e.g. /usr/share/locale) using a translationdomainspecific environment variable (e.g. NOVA_LOCALEDIR).
def install(domain, lazy=False): if lazy: # NOTE(mrodden): Lazy gettext functionality. # # The following introduces a deferred way to do translations on # messages in OpenStack. We override the standard _() function # and % (format string) operation to build Message objects that can # later be translated when we have more information. # # Also included below is an example LocaleHandler that translates # Messages to an associated locale, effectively allowing many logs, # each with their own locale. def _lazy_gettext(msg): """Create and return a Message object. Lazy gettext function for a given domain, it is a factory method for a project/module to get a lazy gettext function for its own translation domain (i.e. nova, glance, cinder, etc.) Message encapsulates a string so that we can translate it later when needed. """ return Message(msg, domain) import __builtin__ __builtin__.__dict__['_'] = _lazy_gettext else: localedir = '%s_LOCALEDIR' % domain.upper() gettext.install(domain, localedir=os.environ.get(localedir), unicode=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __initializeLocale(self):\n langdomain = 'tortugaStrings'\n\n # Locate the Internationalization stuff\n localedir = '../share/locale' \\\n if os.path.exists('../share/locale') else \\\n os.path.join(self._cm.getRoot(), 'share/locale')\n\n gettext.install(langdomain, localedir)", "def add_support_for_localization():\n path = os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)\n possible_topdir = os.path.normpath(path)\n if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):\n sys.path.insert(0, possible_topdir)\n\n gettext.install('nova', unicode=1)", "def install_translations(where='local'):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n\n if where == 'local':\n # if we are local, we also generate new po files\n with cd('apps/dasa/'):\n run('../../bin/django makemessages -l id')\n run('../../bin/django makemessages -l en')\n run('../../bin/django compilemessages')\n with cd('project'):\n# run('../bin/django makemessages -l id')\n run('../bin/django makemessages -l en')\n run('../bin/django compilemessages')\n else: # otherwise, we just compile\n run('git pull')\n with cd('apps/dasa/'):\n run('../../bin/django compilemessages')\n with cd('project'):\n run('../bin/django compilemessages')\n restart(where)", "def lang_init():\n _locale, _encoding = locale.getdefaultlocale() # Default system values\n path = os.path.join(os.path.dirname(sys.argv[0]), 'localization/lang')\n if os.path.exists(path):\n lang = gettext.translation('UnrulyPuzzlePython', path, [_locale],\n fallback=True)\n else:\n lang = gettext.translation('UnrulyPuzzlePython', path,\n fallback=True)\n return lang.gettext", "def __init__(self):\n locale_dir = resource_filename(__name__, 'locale')\n add_domain(self.env.path, locale_dir)", "def add_localizer(event):\r\n request = event.request\r\n localizer = get_localizer(request)\r\n def auto_translate(*args, **kwargs):\r\n return localizer.translate(tsf(*args, **kwargs))\r\n def auto_pluralize(*args, **kwargs):\r\n kwargs.setdefault(\"domain\", \"faapp\")\r\n return localizer.pluralize(*args, **kwargs)\r\n request.localizer = localizer\r\n request.translate = auto_translate\r\n request.ungettext = auto_pluralize\r\n request.environ['fa.translate'] = auto_translate", "def setup_listener_gettext(self):\n\t\tth = current_thread()\n\n\t\t#print '>> listener gettext in ', th.name\n\n\t\ttry:\n\t\t\tth._ = th._licornd.langs[th.listener.lang].ugettext\n\n\t\texcept KeyError:\n\t\t\t# the daemon doesn't have the client lang installed. Not a problem.\n\t\t\t# Still, make a shortcut to the daemon's default translator to\n\t\t\t# avoid trigerring an exception at every call of our translator\n\t\t\t# wrapper.\n\t\t\tth._ = __builtin__.__dict__['_orig__']", "def _translate_freq_domain(self, freq_domain):\n\n lin_spaces = [np.linspace(-0.5, 0.5, x) for x in freq_domain.shape]\n meshgrids = np.meshgrid(*lin_spaces, indexing='ij')\n grid_coords = np.array([mg.flatten() for mg in meshgrids])\n\n phase_shift = np.multiply(grid_coords, self.translations).sum(axis=0) # phase shift is added\n exp_phase_shift = np.exp(-2j * math.pi * phase_shift)\n freq_domain_translated = np.multiply(exp_phase_shift, freq_domain.flatten(order='C')).reshape(freq_domain.shape)\n\n return freq_domain_translated", "def gettext_translate( s ):\n return catalogs.translate(s)", "def handler(catalogs, name):\n gsm = getSiteManager()\n # Try to get an existing domain and add the given catalogs to it\n domain = queryUtility(ITranslationDomain, name)\n if domain is None:\n domain = TranslationDomain(name)\n gsm.registerUtility(domain, ITranslationDomain, name=name)\n for catalog in catalogs:\n domain.addCatalog(catalog)\n # make sure we have a TEST catalog for each domain:\n domain.addCatalog(TestMessageCatalog(name))", "def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"", "def gettext_getfunc( lang ):\n # Note: you would get the gettext catalog here and install it in the\n # closure.\n\n def tr( s ):\n # Note: we do not really translate here, we just prepend the\n # language, but you get the idea.\n return '[%s] %s' % (lang, s)\n\n return tr", "def __init__(self, domain=None, language=None, translation_path=DEFAULT_TRANSLATION_PATH,\n update_on_missing=False, cache_time=datetime.timedelta(hours=1)):\n\n self.default_domain = domain\n self.default_language = language\n self.translation_path = translation_path\n self.update_on_missing = update_on_missing\n self.cache_time = cache_time\n\n self.language = self.init_language()\n self.domain_cache = {}\n\n if not os.path.exists(self.translation_path):\n os.mkdir(self.translation_path)\n\n\tif socket.getfqdn().endswith('.wmflabs'):\n\t self.download_url = Intuition.DOWNLOAD_URL_LABS\n\telse:\n\t self.download_url = Intuition.DOWNLOAD_URL_WWW", "def get_gettext():\n local_path = os.path.realpath(os.path.dirname(sys.argv[0])) + \\\n '/translations'\n langs = []\n lc, encoding = locale.getdefaultlocale()\n if (lc):\n langs = [lc]\n osLanguage = os.environ.get('LANGUAGE', None)\n if (osLanguage):\n langs += osLanguage.split(\":\")\n langs += [\"en_US\"]\n lang = gettext.translation('wicd', local_path, languages=langs, \n fallback=True)\n _ = lang.gettext\n return _", "def fake_ugettext(translations):\n def _ugettext(text):\n return translations.get(text, text)\n return _ugettext", "def fake_ugettext(translations):\r\n def _ugettext(text): # pylint: disable=missing-docstring\r\n return translations.get(text, text)\r\n return _ugettext", "def installQPackage(self, name, domain, version, reconfigure=True):\n installPackageCommand = \"\"\"p = q.qp.find(name=\"%(name)s\", domain=\"%(domain)s\", version=\"%(version)s\")\nif not p:\n raise valueError(\"Package %(domain)s, %(name)s, %(version)s not found\")\nelif len(p) <> 1:\n raise valueError(\"Too many packages found with search criteria %(domain)s, %(name)s, %(version)s\")\nelif not p[0].isInstalled():\n p[0].install()\nelse:\n print \"Package %(domain)s, %(name)s, %(version)s is already installed\"\n\"\"\"%{'name':name,'version':version,'domain':domain,'reconfigure':reconfigure}\n self.executeQshell(installPackageCommand)\n if reconfigure:\n self.executeQshell(\"q.qp._runPendingReconfigeFiles()\")", "def _do_install_hook(self, args):\r\n hook_name = args[1]\r\n fct_name = args[2]\r\n hooks.install_hook(hook_name, self._hooks_fct[fct_name])", "def add_lookup(namespace, directory, package=None, prepend=False):\r\n templates = LOOKUP.get(namespace)\r\n if not templates:\r\n LOOKUP[namespace] = templates = DynamicTemplateLookup(\r\n module_directory=settings.MAKO_MODULE_DIR,\r\n output_encoding='utf-8',\r\n input_encoding='utf-8',\r\n default_filters=['decode.utf8'],\r\n encoding_errors='replace',\r\n )\r\n if package:\r\n directory = pkg_resources.resource_filename(package, directory)\r\n templates.add_directory(directory, prepend=prepend)", "def prereposetup_hook(conduit):\n return init_hook(conduit)", "def add_localizer(event):\n def auto_translate(string):\n \"\"\" Use the message factory to translate strings.\"\"\"\n return localizer.translate(MessageFactory(string))\n\n def gettext_translate(string):\n \"\"\" Translate untranslated strings with FormEncode.\"\"\"\n # Try default translation first\n translation = localizer.old_translate(i18n.TranslationString(string))\n if translation == string:\n # translation failed then use FormEncode\n translation = formencode_api._stdtrans(string)\n return translation\n\n request = event.request\n localizer = i18n.get_localizer(request)\n request.localizer = localizer\n request.translate = auto_translate\n\n if not hasattr(localizer, \"old_translate\"):\n localizer.old_translate = localizer.translate\n locale_name = i18n.get_locale_name(request)\n formencode_api.set_stdtranslation(languages=[locale_name])\n localizer.translate = gettext_translate", "def _install(self):\n # Default implementation\n for pm_name, package in self._provider_package.items():\n if helpers[pm_name]:\n helpers[pm_name].install_package(package)\n return\n raise self.unsure_how_to_install()", "def install_N_():\n\n import __builtin__\n\n __builtin__.N_ = lambda s: s", "def prepare_translations():\n output_fn = '/home/jelle/Desktop/django.csv'\n local('po2csv apps/dasa/locale/id/LC_MESSAGES/django.po %(output_fn)s' % locals())\n print 'output written to %(output_fn)s' % locals()", "def install(target_basedir):\n try:\n shutil.copy2(\n api.get_file_path(DNF_PLUGIN_NAME),\n os.path.join(target_basedir, DNF_PLUGIN_PATH.lstrip('/')))\n except EnvironmentError as e:\n api.current_logger().debug('Failed to install DNF plugin', exc_info=True)\n raise StopActorExecutionError(\n message='Failed to install DNF plugin. Error: {}'.format(str(e))\n )", "def _lazy_ugettext(text: str):\n try:\n # Test if context is available,\n # cf. https://github.com/tracim/tracim/issues/173\n context = StackedObjectProxy(name=\"context\")\n context.translator\n return ugettext(text)\n except TypeError:\n return text", "def gettext_for(locale='en'):\n return Translations.load(\n os.path.join(BASEDIR, 'app', 'translations'), [locale]\n ).ugettext", "def install_package_data(data_dir: str = None):\n\n zen = InstallPackageData(data_dir=data_dir)\n\n zen.fetch_zenodo()", "def test_install_with_local(self):\n parsed_targets = (\n OrderedDict(((\"gettext-runtime\", None), (\"p5-Mojolicious\", None))),\n \"repository\",\n )\n pkg_cmd = MagicMock(return_value={\"retcode\": 0})\n patches = {\n \"cmd.run_all\": pkg_cmd,\n \"pkg_resource.parse_targets\": MagicMock(return_value=parsed_targets),\n }\n with patch.dict(pkgng.__salt__, patches):\n with patch(\"salt.modules.pkgng.list_pkgs\", ListPackages()):\n added = pkgng.install(local=True)\n expected = {\n \"gettext-runtime\": {\"new\": \"0.20.1\", \"old\": \"\"},\n \"p5-Mojolicious\": {\"new\": \"8.40\", \"old\": \"\"},\n }\n self.assertDictEqual(added, expected)\n pkg_cmd.assert_called_with(\n [\"pkg\", \"install\", \"-yU\", \"gettext-runtime\", \"p5-Mojolicious\"],\n output_loglevel=\"trace\",\n python_shell=False,\n env={},\n )", "def test_localedir(self):\n self.chck_triple('localedir')" ]
[ "0.6045004", "0.5466799", "0.526115", "0.51772755", "0.5106761", "0.49439147", "0.49150738", "0.4845263", "0.48276627", "0.48242715", "0.4787425", "0.47347873", "0.47273436", "0.46018976", "0.45374277", "0.44985", "0.4474104", "0.44217488", "0.44217035", "0.4414562", "0.4391696", "0.4356008", "0.43494844", "0.43481743", "0.4340668", "0.4339739", "0.4332081", "0.43302852", "0.43228802", "0.4319261" ]
0.76936036
0
Create and return a Message object. Lazy gettext function for a given domain, it is a factory method for a project/module to get a lazy gettext function for its own translation domain (i.e. nova, glance, cinder, etc.) Message encapsulates a string so that we can translate it later when needed.
def _lazy_gettext(msg): return Message(msg, domain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createMessage( self, *args, **kw ):\n return MailMessage( *args, **kw )", "def install(domain, lazy=False):\r\n if lazy:\r\n # NOTE(mrodden): Lazy gettext functionality.\r\n #\r\n # The following introduces a deferred way to do translations on\r\n # messages in OpenStack. We override the standard _() function\r\n # and % (format string) operation to build Message objects that can\r\n # later be translated when we have more information.\r\n #\r\n # Also included below is an example LocaleHandler that translates\r\n # Messages to an associated locale, effectively allowing many logs,\r\n # each with their own locale.\r\n\r\n def _lazy_gettext(msg):\r\n \"\"\"Create and return a Message object.\r\n\r\n Lazy gettext function for a given domain, it is a factory method\r\n for a project/module to get a lazy gettext function for its own\r\n translation domain (i.e. nova, glance, cinder, etc.)\r\n\r\n Message encapsulates a string so that we can translate\r\n it later when needed.\r\n \"\"\"\r\n return Message(msg, domain)\r\n\r\n import __builtin__\r\n __builtin__.__dict__['_'] = _lazy_gettext\r\n else:\r\n localedir = '%s_LOCALEDIR' % domain.upper()\r\n gettext.install(domain,\r\n localedir=os.environ.get(localedir),\r\n unicode=True)", "def gettext(self, message):\n if self._fallback:\n return self._fallback.gettext(message)\n return message", "def localize(self, msg):\n return self.translations.get(msg, msg)", "def localize(self, msg):\n return self.translations.get(msg, msg)", "def get(self, key, domain=None, language=None, context=None):\n\n if domain is None:\n if self.default_domain is None:\n raise ValueError('No domain given!')\n domain = self.default_domain\n messages = self.get_domain(domain, language)\n\n if not key in messages and self.update_on_missing:\n messages = self.get_domain(domain, language, force_download=True)\n\n if not key in messages:\n raise ValueError('No message for the key {0}!'.format(key))\n\n message = messages[key]\n \n if context is not None:\n for i in range(0, len(context)):\n placeholder = \"${0}\".format(i + 1)\n message = message.replace(placeholder, unicode(context[i]))\n \n return message", "def lgettext(self, message):\n\n return self.get_pseudo(message)", "def gettext(self, message):\n if self._translations.has_key(message):\n return self._translations[message]\n return super(Translations, self).gettext(message)", "def convert_msg(self, msg):\r\n source = msg.msgid\r\n if not source:\r\n # don't translate empty string\r\n return\r\n\r\n plural = msg.msgid_plural\r\n if plural:\r\n # translate singular and plural\r\n foreign_single = self.convert(source)\r\n foreign_plural = self.convert(plural)\r\n plural = {\r\n '0': self.final_newline(source, foreign_single),\r\n '1': self.final_newline(plural, foreign_plural),\r\n }\r\n msg.msgstr_plural = plural\r\n else:\r\n foreign = self.convert(source)\r\n msg.msgstr = self.final_newline(source, foreign)", "def createMessage( self, *args, **kw ):\n if not kw.has_key('charset'):\n kw['charset'] = self.getInputCharset()\n return MailServerBase.createMessage( self, *args, **kw )", "def get_localized_message(message, user_locale):\r\n if isinstance(message, Message):\r\n if user_locale:\r\n message.locale = user_locale\r\n return unicode(message)\r\n else:\r\n return message", "def interpolate_insted_of_translate(\n self, msgid, mapping=None, *args, **kw): # pragma: no cover webdriver\n return zope.i18n.interpolate(msgid, mapping)", "def ugettext(self, message):\n if self._fallback:\n return self._fallback.ugettext(message)\n return unicode(message)", "def gettext(self, string):\n return self._messages.get(string, 'No error message defined')", "def _lazy_ugettext(text: str):\n try:\n # Test if context is available,\n # cf. https://github.com/tracim/tracim/issues/173\n context = StackedObjectProxy(name=\"context\")\n context.translator\n return ugettext(text)\n except TypeError:\n return text", "def pgettext(msgctxt, message):\r\n key = msgctxt + '\\x04' + message\r\n translation = get_translation().gettext(key)\r\n return message if translation == key else translation", "def gettext_for(locale='en'):\n return Translations.load(\n os.path.join(BASEDIR, 'app', 'translations'), [locale]\n ).ugettext", "def auto_translate(string):\n return localizer.translate(MessageFactory(string))", "def gettext_translate( s ):\n return catalogs.translate(s)", "def get_message(self, **kwargs):\n message = Mail()\n if \"from_email\" in kwargs:\n sender = Email()\n message_content = kwargs.get(\"message_content\", \"\")\n sender.name = message_content.get(\"sender\", emailconf.DEFAULT_SENDER)\n sender.email = kwargs.get(\"from_email\", emailconf.DEFAULT_SENDER_EMAIL)\n message.from_email = sender\n if \"subject\" in kwargs:\n message.subject = kwargs.get(\"subject\", \"\")\n if \"text\" in kwargs:\n content = Content(\"text/plain\", kwargs.get(\"text\", \"\"))\n message.add_content(content)\n if \"html\" in kwargs:\n content = Content(\"text/html\", kwargs.get(\"html\", \"\"))\n message.add_content(content)\n if \"category\" in kwargs:\n category = Category(kwargs.get(\"category\", \"\"))\n message.add_category(category)\n\n personalization = self.create_personalization(**kwargs)\n if personalization:\n message.add_personalization(personalization)\n\n return message.get()", "def gettext(self, string):\n return self._translations.gettext(string)", "def gettext_getfunc( lang ):\n # Note: you would get the gettext catalog here and install it in the\n # closure.\n\n def tr( s ):\n # Note: we do not really translate here, we just prepend the\n # language, but you get the idea.\n return '[%s] %s' % (lang, s)\n\n return tr", "def ugettext(self, message):\n if isinstance(message, unicode):\n msg = message.encode(\"utf-8\")\n else:\n msg = message\n if self._translations.has_key(msg):\n return unicode(self._translations[msg], \"utf-8\")\n return super(Translations, self).ugettext(message)", "def createMessage( self, *args, **kw ):\n if not kw.has_key('charset'):\n kw['charset'] = self.getOutputCharset()\n kw['to_mail'] = 1\n return MailServerBase.createMessage( self, *args, **kw )", "def __create_message(sender, recipients, subject, message_text):\n message = {\n \"to\": recipients,\n \"from_email\": sender,\n \"subject\": subject,\n \"html\": message_text,\n }\n\n return message", "def contact_get_message_string(user_name, contact_name, contact_email,\n contact_message):\n if user_name:\n message = (\n \"Message from FreeFrom \\nName: \" +\n contact_name +\n \"\\nUser Name: \" + user_name +\n \"\\nEmail Address: \" + contact_email +\n \"\\nMessage: \" + contact_message)\n else:\n message = (\n \"Message from FreeFrom \\nName: \" +\n contact_name +\n \"\\nEmail Address: \" + contact_email +\n \"\\nMessage: \" + contact_message)\n return message", "def t(message):\n\n tpl = string.Template(message)\n return tpl.substitute(country=settings.COUNTRY_NAME, language=settings.LANGUAGE_NAME)", "def get_gettext():\n local_path = os.path.realpath(os.path.dirname(sys.argv[0])) + \\\n '/translations'\n langs = []\n lc, encoding = locale.getdefaultlocale()\n if (lc):\n langs = [lc]\n osLanguage = os.environ.get('LANGUAGE', None)\n if (osLanguage):\n langs += osLanguage.split(\":\")\n langs += [\"en_US\"]\n lang = gettext.translation('wicd', local_path, languages=langs, \n fallback=True)\n _ = lang.gettext\n return _", "def tr(self, message):\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('MCDM', message)", "def Message(self, *args, **kwargs):\n return Message(self, *args, **kwargs)" ]
[ "0.6170995", "0.6077606", "0.6022286", "0.60164195", "0.60164195", "0.5980296", "0.595576", "0.5942685", "0.58777326", "0.5782563", "0.57686394", "0.5751088", "0.5735052", "0.5731552", "0.57014626", "0.5689661", "0.5685707", "0.566268", "0.5620519", "0.559923", "0.55693084", "0.5526863", "0.5513047", "0.5505829", "0.5498838", "0.5485096", "0.5479767", "0.5468923", "0.545881", "0.5325293" ]
0.81580895
0
Lists the available languages for the given translation domain.
def get_available_languages(domain): if domain in _AVAILABLE_LANGUAGES: return copy.copy(_AVAILABLE_LANGUAGES[domain]) localedir = '%s_LOCALEDIR' % domain.upper() find = lambda x: gettext.find(domain, localedir=os.environ.get(localedir), languages=[x]) # NOTE(mrodden): en_US should always be available (and first in case # order matters) since our in-line message strings are en_US language_list = ['en_US'] # NOTE(luisg): Babel <1.0 used a function called list(), which was # renamed to locale_identifiers() in >=1.0, the requirements master list # requires >=0.9.6, uncapped, so defensively work with both. We can remove # this check when the master list updates to >=1.0, and all projects udpate list_identifiers = (getattr(localedata, 'list', None) or getattr(localedata, 'locale_identifiers')) locale_identifiers = list_identifiers() for i in locale_identifiers: if find(i) is not None: language_list.append(i) _AVAILABLE_LANGUAGES[domain] = language_list return copy.copy(language_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def langs(self, context):\n languages = get_langs(context.message.guild)\n await context.channel.send(LANG_LIST.format(nb_lang=len(languages), langs=enum(languages)))", "def languages():\n r = requests.get('http://translate.yandex.net/api/v1/tr.json/getLangs')\n return r.json['dirs']", "async def public_get_languages_async(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PublicGetLanguages.create(\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def get_languages():\n\n api = (api_name, 'languages')\n\n response = make_request(api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n logger.debug(\"response from spanglish languages: {}\".format(response))\n logger.debug(\"response statuscode from spanglish languages: {}\".format(status_code))\n\n click.echo(\"response message: %s \" % msg)", "def public_get_languages(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PublicGetLanguages.create(\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def get_langs():\r\n temp = \"\"\r\n translate_client = translate.Client()\r\n for i in translate_client.get_languages():\r\n temp += i['name'] + \": \" + i['language'] + \"\\n\"\r\n\r\n return temp", "def get_all_languages():\n\tdef _get():\n\t\tif not frappe.db:\n\t\t\tfrappe.connect()\n\t\treturn frappe.db.sql_list('select name from tabLanguage')\n\treturn frappe.cache().get_value('languages', _get)", "def languages(self):\n\n return self._request('/languages')", "def languages(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'languages')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "async def get_languages_async(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetLanguages.create(\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def wikiLanguages():\n return languages", "def get_languages(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetLanguages.create(\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def available_languages(self):\n data = self._run(\n url_path=\"languages/available\"\n )\n return data['result'].get('languages', [])", "def get_all_languages(with_language_name: bool = False) -> list:\n\n\tdef get_language_codes():\n\t\treturn frappe.get_all(\"Language\", filters={\"enabled\": 1}, pluck=\"name\")\n\n\tdef get_all_language_with_name():\n\t\treturn frappe.get_all(\"Language\", [\"language_code\", \"language_name\"], {\"enabled\": 1})\n\n\tif not frappe.db:\n\t\tfrappe.connect()\n\n\tif with_language_name:\n\t\treturn frappe.cache.get_value(\"languages_with_name\", get_all_language_with_name)\n\telse:\n\t\treturn frappe.cache.get_value(\"languages\", get_language_codes)", "def languages_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(\n repository_id, \"languages\", access_token)", "def list_project_languages(self, project_id):\n data = self._run(\n url_path=\"languages/list\",\n id=project_id\n )\n return data['result'].get('languages', [])", "def list(self):\n for key, value in self.languages.iteritems():\n print key, value", "def book_language_list(request):\n languages = Language.objects.all().order_by('-name')\n return render(request, 'library/book_language_list.html', {\"languages\": languages, })", "def ListConceptLanguages(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def list_domain_names():\n pass", "def languages_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"languages\", access_token)", "def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur", "def get_territory_locales(territory):\n\n return langtable.list_locales(territoryId=territory)", "def languages(request, region_slug):\n try:\n region = Region.objects.get(slug=region_slug)\n\n result = list(\n map(\n lambda l: {\n \"id\": l.language.id,\n \"code\": l.language.code,\n \"native_name\": l.language.name,\n \"dir\": l.language.text_direction,\n },\n region.language_tree_nodes.filter(active=True),\n )\n )\n return JsonResponse(\n result, safe=False\n ) # Turn off Safe-Mode to allow serializing arrays\n except ObjectDoesNotExist:\n return HttpResponse(\n f'No Region found with name \"{region_slug}\".',\n content_type=\"text/plain\",\n status=404,\n )", "def get_languages(self):\n language_list = []\n url = '%s%s/languages.xml' % (self.URL_API, self.API_KEY)\n data = urllib.urlopen(url)\n root = cElementTree.parse(data).getroot()\n for language in root.iter('Language'):\n language_list.append(language.find('abbreviation').text)\n return language_list", "def list_languages(self):\n known = [ob.capitalize() for ob in self.caller.languages.known_languages]\n known += [\"Arvani\"]\n self.msg(\"{wYou can currently speak:{n %s\" % \", \".join(known))\n self.msg(\n \"You can learn %s additional languages.\"\n % self.caller.languages.additional_languages\n )", "def langs(cls):\n codes = cls.codes[:]\n\n if hasattr(cls, 'test_codes'):\n codes += cls.test_codes\n\n codes += cls.closed_wikis\n\n # shortcut this classproperty\n cls.langs = {code: f'{code}.{cls.domain}' for code in codes}\n cls.langs.update({alias: f'{code}.{cls.domain}'\n for alias, code in cls.code_aliases.items()})\n\n return cls.langs", "def get_available_translations(localedir=None):\n\n localedir = localedir or gettext._default_localedir\n\n # usually there are no message files for en\n messagefiles = sorted(glob.glob(localedir + \"/*/LC_MESSAGES/anaconda.mo\") +\n [\"blob/en/blob/blob\"])\n trans_gen = (path.split(os.path.sep)[-3] for path in messagefiles)\n\n langs = set()\n\n for trans in trans_gen:\n parts = parse_langcode(trans)\n lang = parts.get(\"language\", \"\")\n if lang and lang not in langs:\n langs.add(lang)\n # check if there are any locales for the language\n locales = get_language_locales(lang)\n if not locales:\n continue\n\n yield lang", "def languages_display(self):\n is_draft = self.extended_object.publisher_is_draft\n node = self.extended_object.node\n current_and_descendant_nodes = node.__class__.get_tree(parent=node)\n\n course_runs = (\n CourseRun.objects.filter(\n direct_course__extended_object__node__in=current_and_descendant_nodes,\n direct_course__extended_object__publisher_is_draft=is_draft,\n )\n .exclude(catalog_visibility=\"hidden\")\n .only(\"languages\")\n )\n languages = list(\n {x for course_languages in course_runs for x in course_languages.languages}\n )\n instance = CourseRun(languages=languages)\n return instance.get_languages_display()", "def langs(cls):\n codes = cls.codes\n\n if hasattr(cls, 'code_aliases'):\n codes += tuple(cls.code_aliases.keys())\n\n return {code: cls.domain for code in codes}" ]
[ "0.6588208", "0.64708924", "0.6436626", "0.6404924", "0.63879085", "0.6371736", "0.6343199", "0.6338791", "0.6254754", "0.61821175", "0.6165501", "0.6159573", "0.6158885", "0.6088327", "0.6040539", "0.602944", "0.5906514", "0.58874905", "0.58680695", "0.5818944", "0.5817654", "0.58130574", "0.5794492", "0.57773316", "0.57722414", "0.5766591", "0.5765868", "0.5755404", "0.5716086", "0.5692664" ]
0.75461453
0