query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Simple checking function whether an url exists or not for an API for which we expect | def url_was_found(url="localhost:5000/health"):
res = requests.get(url).json()
if res['status_code'] == 200:
return True
elif res['status_code'] == 404:
return False
else:
raise UnexpectedResponseError("Expected 200 OK or 404, got {}.\n".format(res['status']), "Full response : {}".format(res)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False",
"def check_url(url):\n return 'products.json' in url",
"def url_exists(url):\n\n try:\n connection = urlopen(url)\n return connection.getcode() < 400\n except Exception as e:\n return False",
"def url_exists(url):\n # Check for URLs we can't validate\n if url.startswith(\"https://kiwiirc.com\"):\n return True\n if url.startswith(\"https://www.projectcalico.org\"):\n return True\n\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.HTTPError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False\n except urllib2.URLError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False",
"def _url_exists(self, url):\n return url_exists(url)",
"def _url_exists(url):\n h = httplib2.Http()\n try:\n resp = h.request(url, 'HEAD')\n if resp[0].status == 200:\n return True\n except (httplib2.RelativeURIError, httplib2.ServerNotFoundError):\n return False",
"def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']",
"def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok",
"def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)",
"def test_format_price_api_url_exists(self):\n self.assertIsNotNone(format_price_api_url)",
"def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)",
"def _verify_url_exists(url, use_head=False):\n # (str, bool) -> bool\n try:\n if use_head:\n resp = requests.head(url)\n else:\n resp = requests.get(url)\n except requests.exceptions.ConnectionError:\n return False\n\n return resp.status_code in [200, 302]",
"async def has_url(self, url: StrOrURL) -> bool:\n key = self.create_key('GET', url)\n return await self.responses.contains(str(key)) or await self.redirects.contains(str(key))",
"def validate_url(url: str) -> None:\n if not is_valid_url(url):\n raise ValueError(f\"Validation Error. Provided url '{url}' is not valid.\")\n try:\n response = requests.get(url)\n except Exception as e:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")\n else:\n if response.status_code != status.HTTP_200_OK:\n raise ValueError(f\"Validation Error. '{url}' website doesn't exists.\")",
"def test_api_url_no_query(self):\n url = 'http://api.shopstyle.com/action/apiVisitRetailer'\n assert extract_product_id_from_api_url(url) is None",
"def test_not_existing_url(client):\n response = client.get('/not-exists')\n assert response.status_code == 404",
"def is_valid_url(url: str) -> bool:\n try:\n requests.get(url)\n except requests.exceptions.RequestException:\n return False\n return True",
"def url_checker(url):\n if url.startswith(http_req):\n url_name = url[7:]\n # print('URL check passed. Using http')\n return url_name\n if url.startswith(https_req):\n url_name = url[8:]\n # print('URL check passed. Using https')\n return url_name\n else:\n print('URL check failed. not valid http or https URL')\n print(f'Bad URL:{url}')\n sys.exit()\n # return False",
"def is_valid_api(self, url):\n output = self.api.download_is_valid_api_url(url, non_blocking=False)\n error = ''\n if not output:\n error = 'Invalid Anaconda API url.'\n return output, error",
"def check_url(url):\n return get_svninfo(url) != {}",
"def url_is_good(url):\n return website_re.match(url)\n # possible validation of reachability of website\n # http_response = requests.get(url)\n # return http_response < 400:",
"def test_api_lookup(self):\n\n # Set up the url for the api call\n\n expected_url = 'https://www.gov.uk/api/content{}'.format(self.urlsclass.dedupurls[0])\n\n # Make request and extract json.\n\n expected = requests.get(expected_url).json()\n\n assert api_lookup(self.urlsclass.dedupurls[0], 'https://www.gov.uk/api/content') == expected",
"def exists(self, url):\n return (self.base_path / url).exists()",
"def url_check_tester(client, url, status_code):\n response = client.get(url)\n assert response.status_code == status_code, \\\n f'Unexpected status code for {url}'\n assert response.data == b''",
"def url_check(url):\n try:\n request = urllib.request.Request(url)\n request.get_method = lambda: 'HEAD'\n urllib.request.urlopen(request)\n return True\n \n except ValueError:\n return False\n\n except urllib.request.HTTPError:\n return False\n \n except URLError:\n return False",
"def is_ok(url: str) -> bool:\n try:\n resp = requests.get(url)\n except:\n return False\n return True if math.floor(resp.status_code / 100) == 2 else False",
"def test_api_url_no_id(self):\n url = 'http://api.shopstyle.com/action/apiVisitRetailer?pid=uid3600-33034440-48'\n assert extract_product_id_from_api_url(url) is None",
"def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1",
"def test_api_url_similar(self):\n url = 'http://api.shopstyle.co.uk/action/apiVisitRetailer?id=471281504&pid=uid3600-33034440-48'\n assert extract_product_id_from_api_url(url) is None",
"def check_url(url_link):\n res = requests.get(url_link, allow_redirects =True)\n if res.status_code == 200:\n print('valid URL \\n')\n return url_link\n else:\n print('Oupps there is something wrong with your URL. Run the program again!! ')\n return res.status_code"
]
| [
"0.75923854",
"0.7515353",
"0.7493454",
"0.7483758",
"0.7450148",
"0.742065",
"0.7366325",
"0.73550236",
"0.73360634",
"0.7320072",
"0.73066574",
"0.72183454",
"0.71041965",
"0.70459384",
"0.70447576",
"0.70319855",
"0.7022121",
"0.6952489",
"0.6927507",
"0.6901926",
"0.6881055",
"0.68792075",
"0.6863043",
"0.6862138",
"0.6843328",
"0.68259096",
"0.68086654",
"0.6797582",
"0.6789448",
"0.6739142"
]
| 0.75237095 | 1 |
Function to extract a list of genes and write to file | def get_genes(infile,outfile):
gene_list = []
with open(infile) as gene:
tag = False
for line in gene:
if line.startswith('name'):
tag = True
continue
if tag:
items = line.split()
if len(items) > 0:
gene_list.append(items[0])
gene_list = gene_list[1:-7]
with open(outfile, 'w') as outfile:
for i in gene_list:
outfile.write(i+'\n')
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_genes(filename, fmt, outname, outfmt, locuses=None):\n\n record = SeqIO.read(filename, fmt)\n proteins = []\n for feature in filter(lambda f: f.type == \"CDS\", record.features):\n qualifiers = feature.qualifiers\n if locuses is None or qualifiers[\"locus_tag\"][0] in locuses:\n protein = SeqRecord(Seq(qualifiers[\"translation\"][0], IUPAC.protein), \n id=\"{0}|{1}\".format(record.id, qualifiers[\"locus_tag\"][0]),\n name=qualifiers[\"gene\"][0],\n dbxrefs=qualifiers[\"db_xref\"],\n description=qualifiers[\"product\"][0])\n\n proteins.append(protein)\n \n SeqIO.write(proteins, outname, outfmt)",
"def saveTmdbGenres():\n \n listGenres = tmdb.Genres().list()[\"genres\"]\n \n genres = { _format(g[\"name\"]):i for i, g in enumerate(listGenres) }\n\n np.save(GENRES_FILE, np.asarray([genres]))",
"def genes():\n return [\"b2935\", \"b0723\", \"b0451\"]",
"def geneA(nombreA,listaPGA): #Esta sección fue hecha por Ángel\n with open(nombreA + \".txt\", \"w\") as archivo:\n archivo.writelines(listaPGA)",
"def getReadOnGeneFile(rnameList, len_param):\n log.info(\"Select reads that are on genes\")\n for ch in rnameList:\n tcount = 0\n \n geneS = {}#gene start\n geneE = {}#gene end\n g_direct = {}#gene direction\n readS = {}#read start\n readE = {}#read End\n readDic = {}#readDic[id] = read\n sortGeneId = {}\n sortReadId = {}\n genefile = os.path.join(working_dir, 'removeOverlap.'+ch+'.gff')\n readfile = os.path.join(working_dir, 'MappedRead.'+ch+'.sam')\n rgfile = os.path.join(working_dir, 'ReadOnGeneList.'+ch+'.tab')\n log.info(\"Generate \" + rgfile)\n f=open(rgfile, \"w\") \n \n geneS, geneE, g_direct = getGFFStartEnd(genefile, len_param)\n sortGeneId = sortId(geneS)\n \n readS, readE,readDic = getSAMStartEnd(readfile)\n sortReadId = sortId(readS)\n ys = 0\n \n for x in range(len(sortGeneId)):\n \n gID = sortGeneId[x]#gene id\n gs = geneS.get(gID)#gene start\n ge = geneE.get(gID)#gene end\n gd = g_direct.get(gID)\n glineList = []\n sameG = False\n \n for y in range(ys,len(sortReadId)):\n rID = sortReadId[y]\n rs = readS.get(rID)\n re = readE.get(rID)\n if rs >= gs:\n if re <= ge:\n f.write(gID)\n f.write('\\t')\n f.write(str(gs))\n f.write('\\t')\n f.write(str(ge))\n f.write('\\t')\n f.write(gd)\n f.write('\\t')\n f.write(rID)\n f.write('\\t')\n f.write(str(rs))\n f.write('\\t')\n f.write(str(re))\n f.write('\\t')\n f.write(readDic.get(rID))\n elif re > ge:\n ys = y\n break\n elif rs > ge:\n ys = y\n break\n f.close()",
"def readGenes(gtf):\n #read gtf\n genes = HTSeq.GenomicArrayOfSets(\"auto\", stranded=False)\n gs = {}\n for line in open(gtf):\n if line.startswith(\"#\"):\n continue\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if line[2] != 'exon':\n continue\n ds = parseGtfFeature(line[8])\n key = \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]])\n nline = [\n line[0], line[3], line[4],\n \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]]), \".\", line[6]\n ]\n if key not in gs:\n gs[key] = [line[0], int(line[3]), int(line[4])]\n else:\n if int(line[3]) < gs[key][1]:\n gs[key][1] = int(line[3])\n if int(line[4]) > gs[key][2]:\n gs[key][2] = int(line[4])\n for g, v in gs.items():\n iv = HTSeq.GenomicInterval(v[0], v[1], v[2])\n genes[iv] += g\n return genes",
"def get_genes_and_gpr(model,gene_outfile,gpr_outfile):\n model_dict = model_to_dict(model, sort=False)\n genes = pd.DataFrame(model_dict['genes']).set_index(['id'])\n genes.to_csv(gene_outfile)\n all_gpr = pd.DataFrame(model_dict['reactions']).set_index(['id'])\n all_gpr.to_csv(gpr_outfile)\n return [genes, all_gpr]",
"def write_genes_bed(db, bed_path, verbose=True):\n genes = db.features_of_type('gene')\n\n i = 0\n\n with open(bed_path, 'w') as out:\n for g in genes:\n i += 1\n if verbose:\n if i == 1:\n print(\"Writing records:\\n1 ..\")\n elif i%2500 == 0:\n print(i)\n out.write(db.bed12(g.id, name_field='gene_id')+'\\n')",
"def output_refgenes_rpkm(refgene_file, name4save, rna_seq_files):\n file2save = open(name4save + '.txt', 'w')\n header = '\\t'.join([''] + rna_seq_files) + '\\n'\n file2save.write(header)\n result = []\n for rsf in rna_seq_files:\n ref_tool = RefSeqTools()\n ref_tool.read_refgene_file(refgene_file)\n ref_tool.map_reads_2genes(rsf)\n result.append(ref_tool)\n ref_genes = result[0].ref_genes\n for i, rg in enumerate(ref_genes):\n line = rg.name2 + '\\t'\n line += '\\t'.join([str(gene.read_density) for gene in [tool.ref_genes[i] for tool in result]])\n line += '\\n'\n file2save.write(line)\n file2save.close()",
"def to_files(self, gen, filenames=None):\n\n if filenames:\n self.filenames = filenames\n\n for f, arr in zip(self.pathgen, gen):\n np.save(f, arr)",
"def genes_file_creation(input_folder):\n file_paths = {}\n for file_name in os.listdir(input_folder):\n file_paths[file_name] = input_folder + '/' + file_name\n\n df = pa.DataFrame()\n \n for file_name in file_paths:\n df_temp = pa.read_csv(file_paths[file_name], sep='\\t', header=None)\n print(df_temp.columns)\n gene_column = 0\n df_temp = df_temp[[gene_column]]\n df_temp.columns = ['Gene_Name_DE']\n row = []\n file_extension = os.path.splitext(file_name)[1]\n row.append(file_name.replace(file_extension, \"\"))\n row.extend(df_temp['Gene_Name_DE'].tolist())\n df = df.append([row], ignore_index=True)\n\n df.insert(1, 'Description', 'Genes_DE')\n\n df.to_csv('DE_gene.gmt', sep='\\t', index=False, header=False)",
"def save_geneset_to_file(geneset, output_file):\n with gzip.open(output_file, \"w\") as handle:\n handle.write(\"\\n\".join(geneset).encode(\"utf-8\"))",
"def save_per_gene(filename, tn_per_gene, reads_per_gene, aliases_designation):\n\n with open(filename, \"w\") as f:\n\n f.write(\"Gene name\\tNumber of transposons per gene\\tNumber of reads per gene\\n\")\n\n for gene in tn_per_gene:\n tnpergene = tn_per_gene[gene]\n readpergene = reads_per_gene[gene]\n if gene in aliases_designation:\n gene_alias = aliases_designation.get(gene)[0]\n else:\n gene_alias = gene\n f.write(gene_alias + \"\\t\" + str(tnpergene) + \"\\t\" + str(readpergene) + \"\\n\")",
"def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)",
"def _get_genes(self, genes: Union[str, List[str]]) -> List[str]:\n if isinstance(genes, str):\n up = pd.read_table(genes, header=None, comment=\"#\", dtype=str)\n ups= up.values.astype(str)\n ups = list(np.squeeze(ups))\n elif isinstance(genes, (list, tuple)):\n ups = genes\n else:\n raise Exception(\"genes must be filepath, list or tuple\")\n # filter genes\n ups_new = [str(i) for i in ups if str(i) in self.genes]\n\n if len(ups_new) < 1: \n raise Exception(\"No genes found. Please input proper Entrez id\")\n return ups_new",
"def genes(context):\n LOG.info(\"Running scout export genes\")\n adapter = context.obj['adapter']\n \n header = [\"#Chrom\\tStart\\tEnd\\tHgncSymbol\\tHgncID\"]\n\n for line in header:\n click.echo(line)\n\n for gene in export_genes(adapter):\n click.echo(gene)",
"def gos_files_creation(annotation_file, go_namespace_studied):\n go_ontology = pronto.Ontology('http://purl.obolibrary.org/obo/go/go-basic.obo')\n\n # For each GO terms look to the namespaces associated with them.\n go_namespaces = {}\n for go_term in go_ontology:\n go_namespaces[go_term.id] = go_term.other['namespace'][0]\n\n # For each GO terms look if there is an alternative ID fo them.\n go_alt_ids = {}\n for go_term in go_ontology:\n if 'alt_id' in go_term.other:\n for go_alt in go_term.other['alt_id']:\n go_alt_ids[go_alt] = go_term.id\n\n # Genome file with genes associated with GO terms.\n df = pa.read_csv(annotation_file, sep='\\t', header=None)\n df.columns = ['Gene_Name', 'GOs']\n df.replace(np.nan, '', inplace=True)\n\n gos_in_df = []\n for gos in df['GOs']:\n for go in gos.split(','):\n if go not in gos_in_df:\n gos_in_df.append(go)\n\n df.set_index('Gene_Name', inplace=True)\n\n gene_gos = []\n for gene, row in df.iterrows():\n for go in row['GOs'].split(','):\n gene_gos.append((go, gene))\n\n dic_go_genes = {}\n for go in tqdm(gos_in_df):\n genes = []\n for gene_go in gene_gos:\n if go != '' and go not in go_namespaces:\n go = go_alt_ids[go]\n if gene_go[0] == go and go != '' and go_namespaces[go] == go_namespace_studied:\n genes.append(gene_go[1])\n if go != '':\n dic_go_genes[go] = genes\n\n print(len(dic_go_genes))\n\n delete_keys = []\n for go in dic_go_genes:\n if len(dic_go_genes[go]) < 4:\n delete_keys.append(go)\n\n for key in delete_keys:\n del dic_go_genes[key]\n print(len(dic_go_genes))\n\n df_go = pa.DataFrame.from_dict(dic_go_genes, orient='index')\n df_go.insert(0, 'Description', 'GO_terms')\n\n df_go.to_csv('go_gene.gmt', sep='\\t', header=False)\n\n df.reset_index(inplace=True)\n df_query_go = pa.concat([pa.Series(row['Gene_Name'], row['GOs'].split(','))\n for _, row in df.iterrows()]).reset_index()\n df_query_go.columns = ['GOs', 'Gene_Name']\n df_query_go = df_query_go[['Gene_Name', 'GOs']]\n df_query_go.to_csv('query_go.tsv', sep='\\t', index=False)",
"def extract_genes(seq_record):\n return [f for f in seq_record.features if f.type == \"gene\"]",
"def stampaGTFEsIn(dictTranscript, dictGenes, dictInput, fileOut, geneNames):\n\n\tstringaGTF \t\t\t\t= \t\t'%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n'\t\t\t\t\t# Formato della riga da stampare nel file\n\texonF\t\t\t\t\t= \t\t'exon_number \"%d\"'\t\t\t\t\t\t\t# Formato della stringa di tipo exon (True)\n\tintronF\t\t\t\t\t=\t\t'intron_number \"%d\"'\t\t\t\t\t\t# Formato della stringa di tipo intron (False)\n\t\n\t# Indici all'interno del dizionario dei transcript\n\t#\n\tidx_transcriptName = 0\n\tidx_geneID = 1\n\t\n\t# Indici all'interno del dizionari dei geni\n\t#\n\tidx_geneName = 0\n\tidx_cromosoma = 1\n\n\t# Indici all'interno del dizionario degli introni e degli esoni\n\t#\n\tidx_start = 0\n\tidx_end = 1\n\tidx_tipo = 2\t\n\n\t# Tipo di regioni\n\tesone = True\n\tintrone = False\n\n\n\t# Apertura e preparazione dei file da scrivere (un file gtf con\n\t# esoni/introni per ogni gene e uno totale con tutte le regioni per tutti\n\t# i geni passati dall'utente\n\t#\t\n\tfiles = {}\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\n\tfor gene in geneNames:\t\t\t\t\t\t\t\t\t\t\t\t \n\t\tcod = geneNames[gene]\n\t\t# Avendo tanti geni, ad ogni nome di gene si associa la relativa\n\t\t# cartella del gene corrente tra quelli passati dall'utente\n\t\t#\n\t\tif not path.exists(cartella % cod):\n\t\t\tsystem('mkdir ' + cartella % cod)\n\t\tfiles[gene] = open(str(cartella % cod + fileOut), 'w')\n\t\t\n\t# File contenente le regioni esoniche/introniche di tutti i geni\n\t# passati dall'utente (serve per mappare le reads)\n\t#\n\tfileGtf = open(str(fileOut), 'w')\t\t\t\t\t\t\t \n\n\tfor transcriptID in dictInput:\n\t\tgeneID \t\t\t= dictTranscript[transcriptID][idx_geneID]\n\t\tcromosoma\t\t= dictGenes[geneID][idx_cromosoma]\n\t\tgeneName\t\t= dictGenes[geneID][idx_geneName]\n\t\ttranscriptName \t= dictTranscript[transcriptID][idx_transcriptName]\n\t\t# Inizializzazione del numero di esone/introne da stampare nel file\n\t\t#\n\t\tnrEs \t\t\t= 1\n\t\tnrIn \t\t\t= 1\n\t\t\n\t\tfor i in range(0, len(dictInput[transcriptID][idx_start])):\n\t\t\tstart\t\t= dictInput[transcriptID][idx_start][i]\n\t\t\tend\t\t\t= dictInput[transcriptID][idx_end][i]\n\t\t\ttipo\t\t= dictInput[transcriptID][idx_tipo][i]\n\n\t\t\tif tipo == esone:\n\t\t\t\tregione = exonF % (nrEs)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato exon\n\t\t\t\tnrEs += 1\n\t\t\telse:\n\t\t\t\tregione = intronF % (nrIn)\t\t\t\t\t\t\t\t\t\t# Stampa della stringa in formato intron\n\t\t\t\tnrIn += 1\n\t\t\t\t\n\t\t\tstrGtf = stringaGTF % (cromosoma, str(start), str(end), regione,\t\t\n\t\t\t\t\t\t\t\t geneName, transcriptName)\t\t\t\t\t# Creazione della riga del file\n\t\t\t\n\t\t\tif geneName in geneNames:\t\t\t\t\t\t\t\t\t\t\t# Se il gene presenta regioni introniche..\n\t\t\t\tfiles[geneName].write(strGtf)\t\t\t\t\t\t\t\t\t# ..si stampa il file gtf relativo alle proprie..\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..regioni introniche nella propria cartella\n\n\t\t\tfileGtf.write(strGtf)\n\t\t\t\t\n\tif geneNames:\n\t\tfor gene in files:\n\t\t\tfiles[gene].close()\n\n\tfileGtf.close()",
"def write_genre_index(self):\n for giEntry in self.genreIndex:\n # Write to file\n self.db_file.write(giEntry.get_representation())",
"def generate_genes(genbank):\n for (idx, feature) in enumerate(genbank.features):\n if feature.type == 'source' or feature.type == 'gene':\n continue\n row = {\n 'location_start': feature.location.start,\n 'location_end': feature.location.end,\n 'strand': feature.strand,\n 'ref': feature.ref,\n 'ref_db': feature.ref_db\n }\n for (name, val) in feature.qualifiers.items():\n # For some reason, all values under .qualifiers are lists of one elem\n # We join the elems into a string just in case there are ever multiple items\n row[name] = ', '.join(val)\n if not row.get('locus_tag'):\n # No locus tag; skip this one. We can only use features with locus tags.\n continue\n row['_key'] = row['locus_tag']\n # Generate the DNA sequence using biopython\n # https://biopython.org/DIST/docs/api/Bio.SeqFeature.SeqFeature-class.html#extract\n seq_obj = SeqFeature(feature.location, feature.type) # type: SeqFeature\n seq_str = str(seq_obj.extract(genbank.seq))\n row['dna_sequence'] = seq_str\n yield row",
"def get_variants(cls, gen, folder):\n filename = 'temp_output{}.txt'.format(gen)\n\n with open(os.path.join(folder, filename), encoding='utf_8_sig', mode='r') as f:\n lines = f.readlines()\n\n for line in lines:\n if line.startswith('Phonemes'):\n line = line.strip()\n phonemes = line.split(':')[-1].split(',')\n if line.startswith('Allophones'):\n allophones = dict()\n line = line.strip()\n line = line.split(':')[-1]\n if not line:\n pass #no variation this turn\n else:\n line = line.split(',')\n for pair in line:\n ur,sr = pair.split('~')\n allophones[sr] = ur\n\n return phonemes,allophones",
"def dump_probeset2gene(db):\n\n f = \"_\".join((\"probeset2gene\",db[\"database\"], db[\"host\"], db[\"port\"],\".txt\"))\n if not os.path.exists(f):\n cmd = \"\"\"mysql -h %s -P%s -u ensadmin -pensembl \\\n -e \"select dbprimary_acc, stable_id from xref x, object_xref ox, transcript t, gene_stable_id gsi \\\n where %s and x.xref_id=ox.xref_id and t.transcript_id=ensembl_id \\\n and ox.ensembl_object_type='Transcript' \\\n and gsi.gene_id=t.gene_id group by stable_id, dbprimary_acc \" %s > %s\"\"\" % (db[\"host\"],\n db[\"port\"],\n\tAFFY_XREF_FILTER_CLAUSE,\n db[\"database\"],\n f)\n\n exec_command(cmd)\n return f",
"def genes():\n data=pd.read_csv(config['stan'], sep=\" \")\n return list(set(data['Gene_id']))",
"def GetGene(ids, base_url=BASE_URL, fout=None):\n n_out=0; tags=None;\n for id_this in ids:\n gene = rest.Utils.GetURL(base_url+'/gene/{0}'.format(id_this), parse_json=True)\n logging.debug(json.dumps(gene, indent=2))\n if not tags:\n tags = list(gene.keys())\n fout.write(\"\\t\".join(tags)+\"\\n\")\n vals = [(str(gene[tag]) if tag in gene else \"\") for tag in tags]\n fout.write(\"\\t\".join(vals)+\"\\n\")\n n_out+=1\n logging.info(\"n_out: %d\"%(n_out))",
"def _generator():\n filename_1 = 'gene.txt'\n filename_2 = 'geneSynonym.txt'\n gene_set_1 = gene_names(filename_1)\n gene_syn = gene_names(filename_2, complete=False)\n genes = gene_set_1 | gene_syn\n return genes",
"def format_target_genes(d, g, miranda_fnm, id_dict):\n\tprint 'format target genes'\n\toutput_nm = \"%s_netwerk.txt\"%(miranda_fnm[:-4])\n\toutput = open(output_nm, 'w')\n\t#output.write(\"sRNA\\ttotal_target_genes\\ttarget_genes_down\\ttarget_genes\\n\")\n\tfor key in d:\n\t\tif len(d[key]) > 0:\n\t\t\t#print key, len(d[key])\n\t\t\tname = \"sRNA_%s\"%(count)\n\t\t\t#output.write(\">>%s\\t%s\\t%s\\n\"%(key, len(d[key]),len(g[key])))\n\t\t\tfor gene in g[key]:\n\t\t\t\t#print gene\n\t\t\t\toutput.write(\"%s\\t%s\\t%s\\n\"%(id_dict[key], gene[0], key))\n\n\toutput.close()",
"def save_associated_genes(identifiers=[DEFAULT_IDENTIFIER]):\n for identifier in identifiers:\n file_path = os.path.join(EXTERNAL_DATA_PATH, \"{}.json\".format(identifier))\n if os.path.isfile(file_path):\n continue\n associated_genes = get_associated_genes(identifier)\n content = {\"identifier\": get_string_db_identifier(identifier), \"data\": associated_genes}\n with open(file_path, 'w') as f:\n f.write(json.dumps(content, sort_keys=True, indent=4, separators=(',', ': ')))\n print(\"Saved associated genes for {}\".format(identifier))",
"def getFasta(fileGI,fileout = \"gis.fasta\", outfmt = \"fasta\"):\n myGIs = open(fileGI).read().split()\n gilist = [\",\".join(myGIs[i:i+500]) for i in range(0,len(myGIs),500)]\n from Bio import Entrez\n import time\n fout = open(fileout,\"w\")\n Entrez.email = \"[email protected]\"\n for ele in gilist:\n handle = Entrez.efetch(db = \"protein\", id = ele, rettype = outfmt, retmode = \"text\")\n fout.write(handle.read())\n time.sleep(3)\n fout.close()",
"def get_desired_gene(j, k, l):\n unwanted_gene_list = j\n full_gene_file = open(k)\n desired_genes = open(l, 'wt')\n counter = 0\n\n for gene in full_gene_file:\n with open(unwanted_gene_list) as j:\n if gene not in j.read():\n desired_genes.write(gene)\n else:\n counter += 1\n\n\n print(\"Filtered sequences: \" + str(counter))\n full_gene_file.close()\n desired_genes.close()"
]
| [
"0.6505155",
"0.64799863",
"0.6442034",
"0.64137423",
"0.6358331",
"0.6319198",
"0.6303898",
"0.62977004",
"0.62914544",
"0.62514764",
"0.6224801",
"0.6154694",
"0.6145755",
"0.613941",
"0.61073935",
"0.6035381",
"0.60235345",
"0.5973374",
"0.59587014",
"0.59146833",
"0.58895403",
"0.58713186",
"0.5856842",
"0.58069193",
"0.5803432",
"0.5797741",
"0.5794411",
"0.5787395",
"0.57706124",
"0.57361984"
]
| 0.6875583 | 0 |
Open a square. The square is added to `self.opened`. If you survive, the number of mines around xy is published in `self.mines_near[xy]`. If you die, the square is also added to `self.flagged`, and `self.mines_near[xy]` is set to 'mine' instead of a number. | def open(self, xy):
if xy in self.opened:
return
self.opened.add(xy)
if xy in self._mines:
self.mines_near[xy] = 'mine'
self.flag(xy) # simplifies playing after death logic
self.lose()
else:
self.mines_near[xy] = len(self.neighbours[xy] & self._mines)
self.flagged.discard(xy)
self.empty_remaining -= 1
if self.empty_remaining <= 0:
self.win() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_tile(self, y, x):\n # Find the letter index and convert into a y-coordinate.\n # Checks if it is a mine\n if [y, x] in self.mine_locations:\n # explode\n self.show_answer_board([y, x])\n print \"Boomz.\"\n return Minesweeper.IS_A_BOMB\n else:\n # strip(?)tease to the user (oh damn sexy numbers)\n self.tease_user(y, x)\n return Minesweeper.NOT_A_BOMB",
"def special_open_neighbours(self, y, x):\n if self.table_state[y][x] != \"-\" and self.table_state[y][x] == self.flags_nearby(y, x):\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y: # do not open out of bounds\n continue\n # if it is a bomb but not flagged\n if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG:\n self.show_answer_board([ye, xe])\n print \"KABOOM!\"\n return Minesweeper.IS_A_BOMB\n self.open_neighbours(y, x)\n self.print_table(self.table_state)\n return Minesweeper.NOT_A_BOMB",
"def open_tile(self, i, j):\n # Checks for invalid moves.\n if self.game_lost or not self.valid_tile(i, j):\n return []\n if self.tiles[i][j].category != Tiles.closed:\n return []\n if self.game_won:\n return []\n\n # Redistributes mine field and numbers tiles for the first move of the game.\n if self.opened == 0:\n self.adjust_minefield(i, j)\n self.enumerate_tiles()\n \n # Counts the number of tiles opened for checking game winning moves.\n self.opened += 1\n\n # Sets the current closed tile equal to the opened board tile.\n self.tiles[i][j] = self.board[i][j]\n\n # Checks for game ending moves.\n if (self.opened + self.mines) == (self.rows * self.cols):\n self.game_won = True\n if self.tiles[i][j].category == Tiles.mine:\n self.game_lost = True\n\n # Opens adjacent tiles as needed.\n elif self.tiles[i][j].category == Tiles.zero:\n return self.open_adjacents(i, j, [self.tiles[i][j]])\n\n return [self.tiles[i][j]]",
"def open(self):\n self.solenoid.set(self.OPEN)",
"def draw_open(x, y):\n square_pos_x = x * 30\n square_pos_y = (y - 1) * -30\n penup()\n pencolor('#ff9800')\n # Sets the position on the position (15, 25) in the square of size (30,30) and draws a filled circle\n setpos(-500 + square_pos_x + 15, 200 + square_pos_y - 25)\n pendown()\n circle(10)",
"def open(self, i, j):\n if not self.isOpen(i, j):\n # set open to true\n self.arr_open[self._index(i, j)] = True\n # connect to surrounding sites\n [self.qu.union(self._index(i, j), self._index(x[0], x[1]))\n for x in [(i + 1, j), (i - 1, j), (i, j - 1), (i, j + 1)]\n if self.isOpen(x[0], x[1])]",
"def open_neighbours(self, y, x):\n if [y, x] in self.mine_locations:\n return [y, x]\n # generate neighbours with positive indexes\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n # if the indexes are out of the game table, skip\n if xe >= self.x or ye >= self.y:\n continue\n # if the current coordinates are still untouched, update their values\n if self.table_state[ye][xe] == '-':\n self.table_state[ye][xe] = self.final_table[ye][xe]\n # if the coordinate has a value of 0, recursively open it's neighbours.\n if self.final_table[ye][xe] == '0':\n self.open_neighbours(ye, xe)",
"def addMine(self, row, col):\n if not self.isMine(row, col):\n self.board[row, col] = 1\n # Update neighbors array\n for neighborRow, neighborCol in self.getNeighbors(row, col):\n self.mines[neighborRow, neighborCol] += 1",
"def dirty_squares(self) -> None:\n row = ran(0, self.__squares.__len__() - 1)\n column = ran(0, self.__squares[0].__len__() - 1)\n self.__squares[row][column] = Floor._dirty\n print(\"Ensuciamos el piso y quedo así: \", self.__str__())",
"def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine",
"def open_adjacents(self, row, col, opened_tile): \n # Iterates through neighboring tiles, only opening closed tiles adjacent to a zero tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.tiles[i][j].category == Tiles.closed):\n self.opened += 1\n self.tiles[i][j] = self.board[i][j]\n opened_tile.append(self.board[i][j])\n\n # Checks for a game winning move while opening adjacent tiles.\n if (self.opened + self.mines) == (self.rows * self.cols):\n self.game_won = True\n\n # If an adjacent tile is zero, recursively opens that tile's adjacent tiles.\n if self.board[i][j].category == Tiles.zero:\n self.open_adjacents(i, j, opened_tile)\n\n return opened_tile",
"def open(self, row, col):\n self._validate_indexes(row, col)\n self._grid[row][col] = True\n site_idx = row * self._n + col\n # connect to left site\n if col > 0 and self.is_open(row, col - 1):\n self._uf.union(site_idx, site_idx - 1)\n # connect to right site\n if col < self._n - 1 and self.is_open(row, col + 1):\n self._uf.union(site_idx, site_idx + 1)\n # connect to upper site\n if row > 0 and self.is_open(row - 1, col):\n self._uf.union(site_idx, (row - 1) * self._n + col)\n # connect to lower site\n if row < self._n - 1 and self.is_open(row + 1, col):\n self._uf.union(site_idx, (row + 1) * self._n + col)",
"def _open(self, row: int, col: int) -> None:\n\n this_space = self._lookup[(row, col)]\n\n # remove this space from unknowns\n if this_space.position in self._unknowns:\n self._unknowns.pop(this_space.position)\n\n # open this space\n n_hinted = open(row, col)\n this_space.hint = str(n_hinted)\n n_marked = sum(\n 1 for neighbor in this_space.neighbors.values() if neighbor and self._lookup[neighbor].hint == 'x')\n this_space.num_undiscovered = n_hinted - n_marked\n\n # open safe neighbors\n if this_space.num_undiscovered == 0:\n safe_by_proxy = {neighbor for neighbor in this_space.neighbors.values() if\n neighbor and self._lookup[neighbor].hint == '?'}\n for pos in safe_by_proxy:\n self._open(*pos)\n\n # remove this space from any zones it was in.\n for tie in this_space.ties:\n for zone in list(self._lookup[tie].zones):\n if this_space.position in zone:\n new_zone = zone - {this_space.position}\n freq = self._lookup[tie].zones.pop(zone)\n if new_zone:\n self._lookup[tie].zones[new_zone] = freq",
"def dirty_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._dirty",
"def __init__(self, rows, cols, mines):\n self.rows = rows\n self.cols = cols\n self.mines = mines\n self.opened = 0\n self.game_won = False\n self.game_lost = False\n self.board = self.__init__minefield__()\n self.tiles = self.__init__tiles__()",
"def fire(self, event):\n\n # Unbind left-click to prevent user from bombing\n # multiple locations at once\n self.canvas.tag_unbind('square', '<Button-1>')\n self.canvas.update()\n\n # Get co-ordinates of the square that was clicked\n n = self.canvas.find_closest(event.x, event.y)\n n = int(n[0]) - 1\n try:\n coords = self.canvas.coords(self.squares[n])\n coords[0], coords[1], coords[2], coords[3]\n if self.hit[n] == 5: # Location already bombed\n raise IndexError\n except IndexError:\n self.canvas.tag_bind('square', '<Button-1>', self.fire)\n else:\n self.bomb(n)",
"def place_mines(board_size, num_mines):\n mines_placed = 0\n board = np.zeros((board_size, board_size), dtype=int)\n while mines_placed < num_mines:\n rnd = randint(0, board_size * board_size)\n x = int(rnd / board_size)\n y = int(rnd % board_size)\n if is_valid(x, y):\n if not is_mine(board, x, y):\n board[x, y] = MINE\n mines_placed += 1\n return board",
"def open_position(self, position: int):\n # get row, column, and path to the well\n row_name = self.positions[position]['row']\n col_name = self.positions[position]['col']\n well_path = os.path.join(os.path.join(self.root_path, row_name), col_name)\n\n # check to see if this well exists (row/column)\n if os.path.exists(well_path):\n pos_name = self.positions[position]['name']\n pos_path = os.path.join(well_path, pos_name)\n\n # check to see if the position exists\n if os.path.exists(pos_path):\n\n if self.verbose: print(f'Opening subgroup {row_name}/{col_name}/{pos_name}')\n\n # update trackers to note the current status of the writer\n self.current_pos_group = self.store[row_name][col_name][pos_name]\n self.current_well_group = self.store[row_name][col_name]\n self.current_position = position\n\n else:\n raise FileNotFoundError(f'Could not find zarr position subgroup at {row_name}/{col_name}/{pos_name}\\\n Check spelling or create position subgroup with create_position')\n else:\n raise FileNotFoundError(f'Could not find zarr position subgroup at {row_name}/{col_name}/\\\n Check spelling or create column/position subgroup with create_position')",
"def increment_square(self, row, col):\n if not self.valid_square(row, col):\n return\n square = self.array[row][col]\n if square.type == SquareType.BLANK:\n square.type = SquareType.NUMBER\n square.number += 1",
"def set_adjacent_mine_count(self):\n for position in self.grid_coords:\n x, y = position\n if self.grid[y][x] >= 0:\n grid_value = sum(map(self.is_mine, get_adjacent.get_adjacent(position)))\n self.grid[y][x] = grid_value",
"def test_is_occupied(self):\n self.assertFalse(self.test_square.is_occupied())\n\n self.test_square.piece = Piece(ChessColor.BLACK)\n self.assertTrue(self.test_square.is_occupied())",
"def set_square(self, x, y, mark):\n if self.board[x][y] == 0:\n self.board[x][y] = mark\n return True\n else:\n return False",
"def update(self, x, y, flag=False):\n p = self.search(x, y)\n\n if flag:\n p.flag()\n return True\n\n if p.is_bomb():\n for row in self.chessboard:\n for val in row:\n val.open()\n return False\n\n if p.open():\n self.open(p)\n return True",
"def flag_cell(self, event):\n if self.mineboard.gamestate is None:\n x = (event.x-2) // CELLWIDTH\n y = (event.y-2) // CELLWIDTH\n self.mineboard.flag_cell(y, x)\n self.update_cells()\n mines_rem = self.mineboard.minecount - self.mineboard.flagcount\n # updates the mines_left label\n if mines_rem == 1:\n self.mines_left.set(f\"{mines_rem} mine left\")\n else:\n self.mines_left.set(f\"{mines_rem} mines left\")",
"def is_space_open(self, location):\n return self.board[location[1]][location[0]] == 0",
"def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board",
"def open_neighbour_cells(self, my_board, x, y):\n for _x in range(x-1, x+2):\n for _y in range(y-1, y+2):\n if is_valid(_x, _y):\n if is_new_move(my_board, _x, _y):\n my_board[_x, _y] = self.count_neighbour_mines(_x, _y)\n if my_board[_x, _y] == 0:\n my_board = self.open_neighbour_cells(my_board, _x, _y)\n return my_board",
"def get_piece_at_opening(self, x, y):\n self._validate_opening(x, y)\n return self._openings[x][y]",
"def mark_mines(self, cells):\r\n for cell in cells:\r\n row, col = cell\r\n self.mine_field[row][col] = 'x'\r\n self.mines_left -= 1\r\n return",
"def refresh(self, xy):\n button = self.squares[xy]\n\n text, fg, bg = self.text_fg_bg(xy)\n button.config(text=text, fg=fg, bg=bg)\n \n if xy in self.opened:\n button.config(relief=Tk.SUNKEN)\n\n if self.empty_remaining > 0:\n self.message(\"%d non-mines left to open\" %\n self.empty_remaining)"
]
| [
"0.6151162",
"0.5784085",
"0.5716945",
"0.5655926",
"0.56013936",
"0.5556093",
"0.5474702",
"0.540533",
"0.5357954",
"0.53536934",
"0.5344168",
"0.5304478",
"0.5276817",
"0.52626586",
"0.5260349",
"0.5220338",
"0.52165616",
"0.52024376",
"0.51827097",
"0.51771474",
"0.5176331",
"0.51672107",
"0.5149549",
"0.5146766",
"0.5140022",
"0.5138811",
"0.5138811",
"0.5132816",
"0.51206386",
"0.5094604"
]
| 0.80960995 | 0 |
paste knob_value into new node | def paste_val(node, knob_name, knob_value):
if node.knob(knob_name) is not None:
node.knob(knob_name).setValue(knob_value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addOrUpdateFactor(self, knobFactor: cern.lsa.domain.settings.KnobFactor) -> _AbstractKnobBuilder__T:\n ...",
"def add(self):\r\n value = int(self.value_entry.get())\r\n self.value_entry.delete(0, tk.END)\r\n self.value_entry.focus_force()\r\n\r\n self.root.add_node(value)\r\n self.draw_tree()",
"def read_knob_val(node, knob_name):\n\n try:\n return node[knob_name]\n except KeyError:\n return \"\"\n except NameError:\n return \"\"",
"def addFactor(self, knobFactor: cern.lsa.domain.settings.KnobFactor) -> _AbstractKnobBuilder__T:\n ...",
"def loopnodes(knobs={}):\n \n for node in nuke.selectedNodes():\n for knob, value in knobs.items():\n try:\n print 'set', node.name(), knob, knobs[knob]\n node[knob].setValue(knobs[knob])\n except:\n print 'FAILED', node.name(), knob, knobs[knob]",
"def put(rbt, key, value):\n try:\n rbt['root'] = insertNode(rbt['root'], key, value, rbt['cmpfunction'])\n rbt['root']['color'] = node.BLACK\n return rbt\n except Exception as exp:\n error.reraise(exp, 'Bst:Put')",
"def __init__(self, name, node, value=None):\n super(InputPlug, self).__init__(name, node, (OutputPlug, ))\n self.value = value\n self.is_dirty = True\n self.node.inputs[self.name] = self",
"def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value",
"def updateFactor(self, knobFactor: cern.lsa.domain.settings.KnobFactor) -> _AbstractKnobBuilder__T:\n ...",
"def build(self) -> cern.lsa.domain.settings.Knob:\n ...",
"def changeValue(self):\n #productive #onUpDnArrow\n profprint()\n widget = slicer.modules.NeedleFinderWidget\n widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value)+ ' (pt: '+str(self.ptNumber)+')')",
"def set(self, node, value):\n self.val[node] = value",
"def changeValue(self):\r\n # productive #onUpDnArrow\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n # widget.scrollPointButton.setText('Scroll Point for Needle ' + str(widget.editNeedleTxtBox.value) + ' (pt: ' + str(self.ptNumber) + ')')\r\n self.lockControlPoints(widget.editNeedleTxtBox.value)\r\n self.unlockControlPoints(widget.editNeedleTxtBox.value)\r\n widget.drawValidationNeedlesButton.text = \"Render Manual Needle \" + str(widget.editNeedleTxtBox.value)",
"def _update_node(node, value):\n node.N += 1\n node.W += value\n node.Q = node.W / node.N",
"def addKnobChanged(call, args=(), kwargs={}, nodeClass='*', node=None):\n pass",
"def onReadNodeKnobChanged():\n ...",
"def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})",
"def put(self, key, value):\n if key is None:\n return\n self.root = put_in_subtree(self.root, key, value)\n self.root.colour = False # make sure that the root is black",
"def add(self, value):\n self.children.append(Node(value))",
"def on_widget_val_changed(self, val: Data):\n self.update_node_input(val, silent=self._prevent_update.blocked)",
"def setValue(self,val):\n self.input.setText(str(val))",
"def addBranch(self, value, node):\n self.branches[value] = node",
"def __init__(self, value, widget: Gtk.Widget, signal: str):\n super().__init__()\n self.value = value\n self.reset_value = value\n self.value_widget = widget\n self.value_signal = signal",
"def insert(self, new_val):\r\n if self.root is None:\r\n self.root = RBTreeNode(new_val, color=BLACK) # root has to be black\r\n else:\r\n self._insert(self.root, new_val)",
"def setValue(self,val):\n val = float(val)\n self.input.setText(str(val))",
"def set_value(self, value):\n if self.notebook:\n self.widget.value = value\n else:\n if hasattr(self.widget, \"setValue\"):\n self.widget.setValue(value)\n elif hasattr(self.widget, \"setCurrentText\"):\n self.widget.setCurrentText(value)\n elif hasattr(self.widget, \"setText\"):\n self.widget.setText(value)",
"def value(self, new_val):\n # TODO: This is a hack, we should have the head/tail be properties with\n # appropriate setter methods in Connector.\n self.head = new_val\n self.head.add_connector(self)\n self.update()",
"def __init__(self, canvas, variable, value):\n Connector.__init__(self, canvas, value, variable)",
"def push(self, value):\r\n new_node = Node(value)\r\n print('pushing value : ' + str(value))\r\n # print (self.head, new_node.get_value())\r\n if self.head:\r\n # tmp_node = self.head\r\n new_node.set_next(self.head) # set new node's next = head node's next\r\n self.head = new_node # assign the new node as the Head node\r\n else:\r\n self.head = new_node",
"def __init__(self, value, parent = None):\n # initialize new node\n self.value = value\n self.parent = parent\n self.left = None\n self.right = None\n self.height = 1"
]
| [
"0.6018957",
"0.5982678",
"0.585561",
"0.57183415",
"0.564189",
"0.5593129",
"0.5554506",
"0.55129737",
"0.5485929",
"0.5439852",
"0.5420697",
"0.5407335",
"0.52589893",
"0.519508",
"0.51812345",
"0.5165599",
"0.51233375",
"0.5108813",
"0.507851",
"0.5036744",
"0.5035296",
"0.5026276",
"0.50205916",
"0.5017339",
"0.50069433",
"0.49938184",
"0.4971659",
"0.49655676",
"0.49595937",
"0.49540037"
]
| 0.825682 | 0 |
read data from given node, create and fill ConvertNode object and return it | def create_convert_node(node):
try:
file = read_knob_val(node, "file").getValue()
first = int(read_knob_val(node, "first").getValue())
last = int(read_knob_val(node, "last").getValue())
first2 = int(read_knob_val(node, "origfirst").getValue())
last2 = int(read_knob_val(node, "origlast").getValue())
format = read_knob_val(node, "format").value()
except Exception, e:
return None
cv = c.ConvertNode(file, first, last, first2, last2, format)
return cv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert(self, node):\n # get the conversion lut\n node_type = self.get_node_type(node)\n conversion_specs = self.conversion_spec_sheet.get(node_type)\n if not conversion_specs:\n print('No conversion_specs for: %s' % node_type)\n return\n\n # call any call_before\n call_before = conversion_specs.get('call_before')\n if call_before and callable(call_before):\n call_before(node)\n\n # some conversion specs doesn't require a new node to be created\n # so return early if this is the case\n if 'node_type' not in conversion_specs:\n return node\n\n node_creator = self.node_creator_factory(conversion_specs)\n rs_node = node_creator.create()\n\n # rename the material to have a similar name with the original\n if rs_node is not None:\n node_type_name = conversion_specs['node_type'] \\\n if isinstance(conversion_specs['node_type'], str) else \\\n conversion_specs['secondary_type'].replace(' ', '_')\n\n self.rename_node(\n rs_node,\n self.get_node_name(node).replace(\n node_type, node_type_name\n )\n )\n else:\n rs_node = node\n\n # set attributes\n attributes = conversion_specs.get('attributes')\n if attributes:\n for source_attr, target_attr in attributes.items():\n # value can be a string\n if isinstance(target_attr, basestring):\n # check incoming connections\n incoming_connections = \\\n self.get_node_inputs(node, source_attr)\n if incoming_connections:\n # connect any textures to the target node\n for input_ in incoming_connections:\n # input_ >> rs_node.attr(target_attr)\n self.connect_attr(\n input_,\n rs_node,\n target_attr\n )\n else:\n # just read and set the value directly\n self.set_attr(\n rs_node,\n target_attr,\n self.get_attr(node, source_attr)\n )\n\n elif isinstance(target_attr, list):\n # or a list\n # where we set multiple attributes in the rs_node to the\n # same value\n # source_attr_value = node.getAttr(source_attr)\n source_attr_value = self.get_attr(node, source_attr)\n for attr in target_attr:\n self.set_attr(rs_node, attr, source_attr_value)\n # for input_ in node.attr(source_attr).inputs(p=1):\n for input_ in self.get_node_inputs(node, source_attr):\n self.connect_attr(input_, rs_node, attr)\n elif isinstance(target_attr, dict):\n # or another dictionary\n # where we have a converter\n source_attr_value = self.get_attr(node, source_attr)\n for attr, converter in target_attr.items():\n if callable(converter):\n try:\n attr_value = converter(source_attr_value)\n except TypeError:\n # it should use two parameters, also include\n # the node itself\n try:\n attr_value = converter(\n source_attr_value,\n node\n )\n except TypeError:\n # so this is the third form that also\n # includes the rs node\n attr_value = converter(\n source_attr_value,\n node,\n rs_node\n )\n else:\n attr_value = converter\n self.set_attr(rs_node, attr, attr_value)\n\n # call any call_after\n call_after = conversion_specs.get('call_after')\n if call_after and callable(call_after):\n call_after(node, rs_node)\n\n return rs_node",
"def convertNode(self, builder, typeName, data):\n\t\tif typeName not in self.nodeTypeMap:\n\t\t\traise Exception('Node type \"' + typeName + '\" hasn\\'t been registered.')\n\n\t\tconvertedData = self.nodeTypeMap[typeName](self, data)\n\n\t\ttypeNameOffset = builder.CreateString(typeName)\n\t\tdataOffset = builder.CreateByteVector(convertedData)\n\n\t\tObjectData.Start(builder)\n\t\tObjectData.AddType(builder, typeNameOffset)\n\t\tObjectData.AddData(builder, dataOffset)\n\t\treturn ObjectData.End(builder)",
"def _to_node(self, data):\n return Node(\n id = data['ps'],\n name = data['ps'],\n state = NodeState.UNKNOWN,\n public_ip = [data['ip']],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'current_size' : data['memory_mb'],\n 'account_id' : data['account_id'],\n 'type' : data['type']\n }\n )",
"def deserialize(self, data: str) -> 'Node':\n if not data:\n return None\n splits = data.split(' ')\n return self._rebuild(deque(splits))",
"def deserialize(self, data):\n data = data.split(\",\")\n # print(data)\n self.idx = 0\n \n def dfs():\n if data[self.idx] == 'N':\n self.idx += 1\n return None\n node = TreeNode(int(data[self.idx]))\n self.idx += 1\n node.left = dfs()\n node.right = dfs()\n return node\n return dfs()",
"def nodeReader(node):\n processSiteInfo = {\n 'event-data': processEventData(),\n 'local-stage-out': processLocalStageOut(),\n 'calib-data': processCalibData(),\n 'fallback-stage-out': processFallbackStageOut()\n }\n\n report = {}\n sProcess = processSite(processSiteInfo)\n processor = processNode(sProcess)\n processor.send((report, node))\n\n return report",
"def from_node(node, ignore_defaults=True):\n if isinstance(node, basestring):\n node = hou.node(node)\n\n if isinstance(node, hou.VopNode):\n node = node\n else:\n return None\n\n directive = get_directive_from_nodetype(node.type())\n\n if directive is None:\n return None\n\n dtype = node.type().definition().sections()[\"FunctionName\"].contents()\n\n if directive == \"material\":\n return MaterialNode(node, ignore_defaults)\n elif directive == \"texture\":\n return TextureNode(node, ignore_defaults)\n elif dtype == \"pbrt_spectrum\":\n return SpectrumNode(node)\n return BaseNode(node, ignore_defaults)",
"def deserialize(self, data):\n def build():\n val = array.next()\n if val == '#':\n return None\n node = TreeNode(int(val))\n node.left = build()\n node.right = build()\n return node\n array = iter(data.split())\n return build()",
"def deserialize(self, data):\n\n def preOrder(it):\n v = next(it)\n if v == 'None':\n return None\n node = TreeNode(int(v))\n node.left = preOrder(it)\n node.right = preOrder(it)\n return node\n\n data = data.split(' ')\n it = iter(data)\n return preOrder(it)",
"def deserialize(self, data):\n self.data = data\n \n if(data[0] == \"X\") :\n return None\n else :\n t = TreeNode(int(self.data[: self.data.find(\",\")]))\n t.left = self.deserialize(self.data[self.data.find(\",\") + 1 :])\n t.right = self.deserialize(self.data[self.data.find(\",\") + 1 :])\n return t",
"def parse_data(node):\n if node['__typename'] == 'Repository':\n return parse_repo_data(node)\n if node['__typename'] == 'Issue' or node['__typename'] == 'PullRequest':\n return parse_issue_data(node)",
"def deserialize(self, data):\n vals = iter(data.split(\",\"))\n\n def to_node():\n c = vals.next()\n if c == '#':\n return None\n else:\n node = TreeNode(int(c))\n node.left = to_node()\n node.right = to_node()\n return node\n\n return to_node()",
"def deserialize(self, data):\n if data == \"\":\n return None\n \n data = data.split(' ')[::-1]\n def build(root, data):\n nodeVal = data.pop()\n if nodeVal == '#':\n return None\n\n root = TreeNode(int(nodeVal))\n root.left = build(root.left, data)\n root.right = build(root.right, data)\n return root \n \n \n return build(None, data)",
"def _read_node(self, offset):\n self.fh.seek(offset)\n node = _unpack_struct_from_file(B_LINK_NODE_V1, self.fh)\n assert node['signature'] == b'TREE'\n assert node['node_type'] == 1\n\n keys = []\n addresses = []\n for _ in range(node['entries_used']):\n chunk_size, filter_mask = struct.unpack('<II', self.fh.read(8))\n fmt = '<' + 'Q' * self.dims\n fmt_size = struct.calcsize(fmt)\n chunk_offset = struct.unpack(fmt, self.fh.read(fmt_size))\n chunk_address = struct.unpack('<Q', self.fh.read(8))[0]\n\n keys.append(OrderedDict((\n ('chunk_size', chunk_size),\n ('filter_mask', filter_mask),\n ('chunk_offset', chunk_offset),\n )))\n addresses.append(chunk_address)\n node['keys'] = keys\n node['addresses'] = addresses\n return node",
"def from_xml_node(cls, xml_node):\n raise NotImplementedError(\"from_xml_node must be implemented by derived classes.\")",
"def get_data(node):\n return node['data']",
"def deserialize(self, data):\n if not data:\n return \n root = TreeNode(data.pop(0))\n \n def helper(node):\n if data and node:\n ltmp = data.pop(0)\n node.left = (TreeNode(ltmp) if not ltmp==None else None)\n if data: \n rtmp = data.pop(0)\n node.right = (TreeNode(rtmp) if not rtmp==None else None) \n helper(node.left)\n helper(node.right)\n \n helper(root)\n return root",
"def deserialize(self, data):\n vals = [val for val in data.split()]\n queue = collections.deque(vals)\n if not queue:\n return\n root = Node(int(queue.popleft()), [])\n\n def helper(node):\n if not queue:\n return\n while queue[0] != '#':\n child = Node(int(queue.popleft()), [])\n node.children.append(child)\n helper(child)\n queue.popleft()\n\n helper(root)\n return root",
"def deserialize(self, data):\n # if data:\n # root = TreeNode(data.pop(0))\n # # root.val = data.pop\n if not data:\n return None\n data = data.split(' ')\n data = iter(data)\n\n def resucsbuild():\n try:\n val = next(data)\n except StopIteration:\n return\n if val == '*':\n return\n else:\n node = TreeNode(int(val))\n node.left = resucsbuild()\n node.right = resucsbuild()\n return node\n\n return resucsbuild()\n\n\n # Your Codec object will be instantiated and called as such:\n # codec = Codec()\n # codec.deserialize(codec.serialize(root))",
"def deserialize(self, data):\n if not data:\n return None\n ls = data.split('~')\n tree = []\n for item in ls:\n if item == 'None':\n tree.append(None)\n else:\n tree.append(int(item))\n\n def generator(ls):\n if not ls:\n return\n r = TreeNode(ls[0])\n q = collections.deque([r])\n tree_len = len(ls)\n cnt = 1\n while cnt < tree_len:\n if not q:\n break\n node = q.popleft()\n if node:\n node.left = TreeNode(ls[cnt]) if ls[cnt] is not None else None\n q.append(node.left)\n if cnt + 1 < tree_len:\n node.right = TreeNode(ls[cnt + 1]) if ls[cnt + 1] is not None else None\n q.append(node.right)\n cnt += 1\n cnt += 1\n return r\n ans = generator(tree)\n return ans",
"def __init__(self, data, node):\n self.data = data\n self.node = node",
"def convertData(data):\n\n return data",
"def deserialize(self, data):\n array = data.split(\",\")\n def dfs(array):\n if len(array) == 0: return None\n first = array.pop(0)\n if first == \"null\": return None\n node = TreeNode(int(first))\n node.left = dfs(array)\n node.right = dfs(array)\n return node\n return dfs(array)",
"def deserialize(self, data: str) -> 'Node':\n if not data:\n return None\n nodes = data.split(\"#\")\n root = Node(int(nodes[0]), [])\n queue = deque([root])\n index = 1\n while queue:\n node = queue.popleft()\n while nodes[index] != SEPERATOR:\n new = Node(int(nodes[index]), [])\n node.children.append(new)\n queue.append(new)\n index += 1\n index += 1\n return root",
"def _read_node(self, offset):\n self.fh.seek(offset)\n node = _unpack_struct_from_file(B_LINK_NODE_V1, self.fh)\n assert node['signature'] == b'TREE'\n\n keys = []\n addresses = []\n for _ in range(node['entries_used']):\n key = struct.unpack('<Q', self.fh.read(8))[0]\n address = struct.unpack('<Q', self.fh.read(8))[0]\n keys.append(key)\n addresses.append(address)\n # N+1 key\n keys.append(struct.unpack('<Q', self.fh.read(8))[0])\n node['keys'] = keys\n node['addresses'] = addresses\n return node",
"def nodeReader(node):\n\n processLfnPfn = {\n 'path-match': processPathMatch(),\n 'protocol': processProtocol(),\n 'result': processResult(),\n 'chain': processChain()\n }\n\n report = {'lfn-to-pfn': [], 'pfn-to-lfn': []}\n processSMT = processSMType(processLfnPfn)\n processor = expandPhEDExNode(processStorageMapping(processSMT))\n processor.send((report, node))\n return report",
"def convert_txt_to_data():\n pass",
"def deserialize(self, data):\n if len(data) == 0:\n return None\n root = TreeNode(data[0])\n root.left = self.deserialize(data[1]) \n root.right = self.deserialize(data[2])\n return root",
"def deserialize(self, data):\n # root = TreeNode(data[0])\n # self.helper(data)\n if not data: return []\n return self.helper(data)",
"def read(node):\n shapes = list()\n\n # -- If we have a transform, add any nurbs curves\n # -- from it\n for shape in node.getShapes():\n if isinstance(shape, pm.nt.NurbsCurve):\n shapes.append(shape)\n\n if not shapes:\n return None\n\n # -- Define out output data. Right now we're only storing\n # -- cv's, but we wrap it in a dict so we can expand it\n # -- later without compatibility issues.\n data = dict(\n node=node.name(),\n curves=list(),\n up_axis=pm.upAxis(q=True, axis=True),\n )\n\n # -- Cycle the shapes and store thm\n for shape in shapes:\n\n node_data = dict(\n cvs=list(),\n form=shape.f.get(),\n degree=shape.degree(),\n knots=shape.getKnots()\n )\n\n # -- Collect the positional data to an accuracy that is\n # -- reasonable.\n for cv in shape.getCVs():\n node_data['cvs'].append(\n [\n round(value, 5)\n for value in cv\n ]\n )\n\n data['curves'].append(node_data)\n\n return data"
]
| [
"0.679477",
"0.6682094",
"0.60348463",
"0.59449285",
"0.59174734",
"0.5899135",
"0.58760214",
"0.58396685",
"0.5806423",
"0.57982457",
"0.5761724",
"0.5697374",
"0.56820565",
"0.56805956",
"0.56773216",
"0.5675179",
"0.5658774",
"0.5595692",
"0.55716044",
"0.55578136",
"0.5555727",
"0.5554043",
"0.5547451",
"0.5523932",
"0.5517092",
"0.5516377",
"0.54956806",
"0.5475586",
"0.54705435",
"0.54561204"
]
| 0.70612025 | 0 |
try to read the knob value; if it doesn't exist return empty String | def read_knob_val(node, knob_name):
try:
return node[knob_name]
except KeyError:
return ""
except NameError:
return "" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue",
"def get_value(default):\n output(\" [\" + default + \"]: \")\n response = read_chomped_line()\n if response == \"\":\n return default\n else:\n return response",
"def getStrNo(self, key):\n value = self.getConf(key);\n if value == \"no\":\n return None\n else:\n return value",
"def get_value(value):\n if value:\n return value.split('\\n')[0]\n else:\n return None",
"def get_nick_or_channel_value(self, name, key, default=None):\n name = Identifier(name)\n if name.is_nick():\n return self.get_nick_value(name, key, default)\n else:\n return self.get_channel_value(name, key, default)",
"def get_config_value(keyword):\n if g_configs and keyword in g_configs:\n return g_configs[keyword]\n return \"\"",
"def value(self):\n for rb in self.rb:\n# if rb.isChecked():\n if rb.isDown():\n return str(rb.text())\n return ''",
"def _get_clipping_value(clipping, key):\n\tif key in clipping:\n\t\treturn clipping[key]\n\treturn \"N/A\"",
"def get_value(self, key: str) -> Optional[str]:\n raise NotImplementedError",
"def getvalue(self,num,name):\n return self.M.conf(num)[name]",
"def get_for_print(cls, name):\n option = cls.get(name)\n if len(option) == 0:\n return \"<empty>\"\n else:\n return option",
"def get_board_property(key):\r\n with open('/etc/lsb-release') as f:\r\n pattern = '%s=(.*)' % key\r\n pat = re.search(pattern, f.read())\r\n if pat:\r\n return pat.group(1)\r\n return ''",
"def paste_val(node, knob_name, knob_value):\n\n if node.knob(knob_name) is not None:\n node.knob(knob_name).setValue(knob_value)",
"def get_value(self):\n if self.notebook:\n return self.widget.value\n else:\n if hasattr(self.widget, \"value\"):\n return self.widget.value()\n elif hasattr(self.widget, \"currentText\"):\n return self.widget.currentText()\n elif hasattr(self.widget, \"text\"):\n return self.widget.text()",
"def get(self):\n # We use here the fact, that when used in a widget, the value will be\n # retrieved directly instead through .get(). Thus the widget will always \"see\" the str representation.\n value = self._tk.globalgetvar(self._name)\n try:\n value = self.convert(value)\n except Exception as e:\n value = Invalid\n if self._validated_hook:\n self._validated_hook(value is not Invalid)\n return value",
"def read():\n return mac_slideshow.preferences.read(KEY)",
"def get_value(self, device_name):\n return epics.caget(str(device_name))",
"def parse_value(self, value_name, default=None):\n\t\treturn self.cfg_root.find(value_name).text",
"def get_control_value(self, setting_name):\n control_widget, property, setting_type = self.get_control_widget(setting_name)\n ret = getattr(control_widget, property) if control_widget else None\n if ret is not None and control_widget == \"filetypes\": ret = ret.split(',')\n return ret",
"def sensor_value(self) -> Optional[str]:\n raise NotImplementedError()",
"def get_volume(self):\n import fcntl\n import struct\n knob = bytearray(struct.pack(\"III\", 0, 0, 0)) # VOLUME_DEVICE_ID, VOLUME_KNOB_ID, <Unused>\n try:\n fcntl.ioctl(self.mixer_fd, 2, knob, True)\n _,_,value = struct.unpack(\"III\", knob)\n return value\n except:\n return 0",
"def get(self, key):\n return \"\"",
"def _get_field(self, section, field):\n if not self._configparser.has_option(section, field):\n return None\n return self._configparser.get(section, field).strip()",
"def get_control_widget(self, setting_name):\n # The setting (from file) may not have a corresponding widget on the Settings screen\n if setting_name not in self.controls:\n return None, None, None\n control_widget = self.controls[setting_name]\n if hasattr(control_widget, 'active'):\n return control_widget, 'active', bool\n elif hasattr(control_widget, 'state') and not isinstance(control_widget, MDDropDownItem) and not \\\n isinstance(control_widget, Spinner):\n return control_widget, 'state', str\n elif hasattr(control_widget, 'text'):\n return control_widget, 'text', str\n if hasattr(control_widget, 'path'):\n return control_widget, 'path', str\n else:\n logger.warning(f'Could not detect type for {control_widget}')",
"def get_value(self):\r\n return input(\"Enter your choice :\")",
"def _getTextForRequiredObject(self, obj):\n\n if not settings.presentRequiredState:\n return None\n\n state = obj.getState()\n if state.contains(pyatspi.STATE_REQUIRED):\n return settings.brailleRequiredStateString\n else:\n return None",
"def _GetConfigValue(self, config_parser, section_name, value_name):\n try:\n return config_parser.get(section_name, value_name).decode('utf-8')\n except configparser.NoOptionError:\n return",
"def _GetConfigValue(self, config_parser, section_name, value_name):\n try:\n return config_parser.get(section_name, value_name).decode('utf-8')\n except (configparser.NoOptionError, configparser.NoSectionError):\n return",
"def ini_get(interp, vname):\n w_value = interp.config.get_ini_w(vname)\n if w_value is None:\n return interp.space.w_False\n return interp.space.as_string(w_value)",
"def guiField(self, value):\n return None"
]
| [
"0.59298456",
"0.57521105",
"0.56507623",
"0.55940926",
"0.55683196",
"0.55596906",
"0.554398",
"0.5501618",
"0.545086",
"0.5449694",
"0.5449388",
"0.5442163",
"0.5379863",
"0.536139",
"0.5344755",
"0.5336155",
"0.532224",
"0.5309412",
"0.53077966",
"0.530188",
"0.5264041",
"0.5262607",
"0.52518296",
"0.52453077",
"0.5240704",
"0.5231341",
"0.52247196",
"0.51812387",
"0.5170871",
"0.5170837"
]
| 0.8030681 | 0 |
proxy(object[, callback]) create a proxy object that weakly references 'object'. 'callback', if given, is called with a reference to the proxy when 'object' is about to be finalized. | def weakref_proxy(*args, **kwargs):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _proxify(func) -> weakref.ProxyType:\r\n _keep_alive.append(func)\r\n return weakref.proxy(func)",
"def fl_set_object_callback(ptr_flobject, pyfn_CallbackPtr, numdata):\n #FL_CALLBACKPTR = cty.CFUNCTYPE(None, cty.POINTER(xfdata.FL_OBJECT),\n # cty.c_long)\n _fl_set_object_callback = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_callback\",\\\n xfdata.FL_CALLBACKPTR, [cty.POINTER(xfdata.FL_OBJECT),\n xfdata.FL_CALLBACKPTR, cty.c_long],\n \"\"\"FL_CALLBACKPTR fl_set_object_callback(FL_OBJECT * obj,\\\n FL_CALLBACKPTR callback, long int argument)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n l_numdata = library.convert_to_longc(numdata)\n library.verify_function_type(pyfn_CallbackPtr)\n cfn_CallbackPtr = xfdata.FL_CALLBACKPTR(pyfn_CallbackPtr)\n library.keep_cfunc_refs(cfn_CallbackPtr, pyfn_CallbackPtr)\n library.keep_elem_refs(ptr_flobject, numdata, l_numdata)\n retval = _fl_set_object_callback(ptr_flobject, cfn_CallbackPtr, \\\n l_numdata)\n return retval",
"def __del__(self):\n # weakref callbacks are rather low level, and working out how to use\n # them correctly requires a bit of head scratching. One must find\n # somewhere to store the weakref till after the referent is dead, and\n # without accidentally keeping the referent alive. Then one must\n # ensure that the callback frees the weakref (without leaving any\n # remnant ref-cycles).\n #\n # When it is an option, using a __del__ method is far less hassle.\n #\n # Source: https://bugs.python.org/issue15528\n self._finalize()",
"def callback(self, obj):\r\n assert self.__obj is None, 'Only one object can be registered.'\r\n assert isinstance(obj, RemoteReference)\r\n\r\n # Store the remote reference\r\n self.__obj = obj\r\n\r\n # inform when the remote reference is disconnected using __disconnected\r\n obj.notifyOnDisconnect(self.__disconnected)\r\n\r\n # Call all remaining remote calls made before the remote reference\r\n # arrived\r\n for pending in self.__pending:\r\n pending.callback(obj)\r\n\r\n self.__pending = None",
"def fl_call_object_callback(ptr_flobject):\n _fl_call_object_callback = library.cfuncproto(\n library.load_so_libforms(), \"fl_call_object_callback\",\\\n None, [cty.POINTER(xfdata.FL_OBJECT)],\\\n \"\"\"void fl_call_object_callback(FL_OBJECT * ob)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n _fl_call_object_callback(ptr_flobject)",
"def __init__(self, obj):\n self._store = {}\n self.obj = weakref.proxy(obj)",
"def dummy_callback(obj):\n pass",
"def make_weak_ref(f):\n return weak_method(f) if hasattr(f, '__self__') else weakref.ref(f)",
"def callback(self, callback, *args, **kwds):\n\n def _exit_wrapper(exc_type, exc, tb):\n callback(*args, **kwds)\n\n # We changed the signature, so using @wraps is not appropriate, but\n # setting __wrapped__ may still help with introspection\n _exit_wrapper.__wrapped__ = callback\n self.push(_exit_wrapper)\n return callback # Allow use as a decorator",
"def fl_set_object_posthandler(ptr_flobject, pyfn_HandlePtr):\n #FL_HANDLEPTR = cty.CFUNCTYPE(cty.c_int, cty.POINTER(xfdata.FL_OBJECT),\n # cty.c_int, xfdata.FL_Coord, xfdata.FL_Coord, cty.c_int, cty.c_void_p)\n _fl_set_object_posthandler = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_posthandler\",\n xfdata.FL_HANDLEPTR, [cty.POINTER(xfdata.FL_OBJECT),\n xfdata.FL_HANDLEPTR],\\\n \"\"\"FL_HANDLEPTR fl_set_object_posthandler(FL_OBJECT * ob,\n FL_HANDLEPTR post)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.verify_function_type(pyfn_HandlePtr)\n cfn_HandlePtr = xfdata.FL_HANDLEPTR(pyfn_HandlePtr)\n library.keep_cfunc_refs(cfn_HandlePtr, pyfn_HandlePtr)\n library.keep_elem_refs(ptr_flobject)\n retval = _fl_set_object_posthandler(ptr_flobject, cfn_HandlePtr)\n return retval",
"def __init__(self, obj):\n obj.Proxy = self",
"def __init__(self, obj):\n obj.Proxy = self",
"def fl_free_object(ptr_flobject):\n _fl_free_object = library.cfuncproto(\n library.load_so_libforms(), \"fl_free_object\",\\\n None, [cty.POINTER(xfdata.FL_OBJECT)],\\\n \"\"\"void fl_free_object(FL_OBJECT * obj)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n _fl_free_object(ptr_flobject)",
"def handle_cb(self, obj, cb, kwargs):\n callbacks = kwargs.get(\"callbacks\", {})\n callback_exception_failure = kwargs.get(\"callback_exception_failure\", True)\n\n callback = callbacks.get(cb, None)\n if callback:\n m = \"Running callback function {f} for {n}\".format\n m = m(f=callback, n=cb)\n self.mylog.debug(m)\n try:\n obj = callback(handler=self, obj=obj, kwargs=kwargs)\n except Exception as e:\n if callback_exception_failure:\n raise\n else:\n m = \"Exception occurred in callback function {f} for {n}: {e}\".format\n m = m(f=callback, n=cb, e=e)\n self.mylog.exception(m)\n else:\n m = \"No callback function specified for {n}\".format\n m = m(n=cb)\n self.mylog.debug(m)\n return obj",
"def __new__(cls, receiver, callback=None):\n\n if isinstance(receiver,MethodType):\n try:\n self = ref.__new__(cls,receiver.im_self,callback)\n except TypeError:\n return receiver\n else:\n self.func = receiver.im_func\n return self\n\n if isinstance(receiver,weak_receiver):\n return receiver\n\n try:\n return ref.__new__(cls,receiver,callback)\n except TypeError:\n return receiver",
"def on_reverse(self, callback):\n self._reverse_callback = callback if callable(callback) else _void",
"def __call__(self, proxy):\n def _lookup():\n try:\n return getattr(self, proxy)\n except AttributeError:\n raise UnboundProxyError(\"object '%s' unbound\" % proxy)\n return Proxy(_lookup)",
"def set_finish_callback( callback ):",
"def set_finish_callback( callback ):",
"def __init__(self, callback, *args, **kwargs):\n self.callback = lambda: callback(*args, **kwargs)",
"def fl_deactivate_object(ptr_flobject):\n _fl_deactivate_object = library.cfuncproto(\n library.load_so_libforms(), \"fl_deactivate_object\",\\\n None, [cty.POINTER(xfdata.FL_OBJECT)],\\\n \"\"\"void fl_deactivate_object(FL_OBJECT * ob)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n _fl_deactivate_object(ptr_flobject)",
"def __destroy(self):\r\n m = 'Referenced object {0} dead.'.format(self.__class__.__name__)\r\n self.__notify(Failure(DeadReferenceError(m)))\r\n\r\n # Destroy object on the remote side. Takes care if it's already\r\n # destroyed.\r\n if self.__obj:\r\n def eb(failure):\r\n from twisted.spread.pb import PBConnectionLost #@Reimport\r\n if not failure.check(PBConnectionLost):\r\n log.err(failure)\r\n\r\n try:\r\n self.__obj.callRemote('destroy').addErrback(eb)\r\n except DeadReferenceError, PBConnectionLost:\r\n pass\r\n\r\n self.__obj = None",
"def callbackFor(callable, argIndex=-1):\n def addClosure(function):\n closure = _makeClosure(function, callable, argIndex)\n function.pyobjc_closure = closure\n return function\n\n return addClosure",
"def on_del(self, callback):\n self._del_callback = callback if callable(callback) else _void",
"def on_del(self, callback):\n self._del_callback = callback if callable(callback) else _void",
"def forward_callbacks(self, obj):\n obj.add_change_callback(self._forward_cb)",
"def wrapper(coro: CoroutineFunction) -> NoReturn:\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError('Callback function must be coroutine function')\n self._callback = coro\n return coro",
"def proxydescriptor(self, value: ProxyDescriptor):\n self._proxydescriptor = value",
"def set_callback(self, callback):\n if not callable(callback):\n raise ValueError(\"`callback` must be callable.\")\n self._callback_end = callback",
"def register_proxy(self, proxy):\n self.__proxy = proxy"
]
| [
"0.56864166",
"0.5649651",
"0.5597413",
"0.5393539",
"0.51406497",
"0.5135785",
"0.5061495",
"0.49494594",
"0.48840693",
"0.48092043",
"0.47971678",
"0.47971678",
"0.47801232",
"0.4731322",
"0.472666",
"0.46767485",
"0.46504003",
"0.46494353",
"0.46494353",
"0.45718247",
"0.45093906",
"0.44928083",
"0.44907454",
"0.44831643",
"0.44831643",
"0.44545236",
"0.445375",
"0.4450446",
"0.44352788",
"0.4426328"
]
| 0.6606573 | 1 |
Need to pass a list of Card objects in. The deck should also know what game it's for. That information belongs with the deck since a deck will be made up of cards from just one game. Also, a deck for a given game should be able to tell you whether it's a legal deck for a given format in that game however, this is for subclasses to implement. | def __init__(self, decklist):
self.decklist = decklist
# Since Card implements __str__(), `print self.decklist` is a
# good-enough string representation of the deck. Note that decks
# should assert that count > 0 for all cards in the full deck. What
# kind of validation do we want to do?
self.short_game_name = None
self.game_max_copies = 0
self.game_max_copies_exempt = [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.deck = []\n for n in range(1, 14):\n card1 = Card(n, \"diamond\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"spade\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"heart\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"club\")\n self.deck.append(card1)",
"def __init__(self):\n \n self.deck = [Card(suit,rank) for suit in SUITS for rank in RANKS]",
"def get_deck():\n deck = []\n for suit in Suit:\n for rank in Rank:\n deck.append(Card(suit, rank))\n return deck",
"def generate_deck(self):\n\t\tsuits = [\"hearts\", \"spades\",\"diamonds\",\"clubs\"]\n\t\tcards = []\n\n\t\tfor suit in suits:\n\t\t\tif self.ace_as_eleven:\n\t\t\t\tace = Card(\"Ace\", 11, suit)\n\t\t\telse:\n\t\t\t\tace = Card(\"Ace\", 1, suit)\n\t\t\tcards.append(ace)\n\n\t\t\ttwo = Card(\"Two\", 2, suit)\n\t\t\tcards.append(two)\n\t\t\t\n\t\t\tthree = Card(\"Three\", 3, suit)\n\t\t\tcards.append(three)\n\n\t\t\tfour = Card(\"Four\", 4, suit)\n\t\t\tcards.append(four)\n\n\t\t\tfive = Card(\"Five\", 5, suit)\n\t\t\tcards.append(five)\n\n\t\t\tsix = Card(\"Six\", 6, suit)\n\t\t\tcards.append(six)\n\n\t\t\tseven = Card(\"Seven\", 7, suit)\n\t\t\tcards.append(seven)\n\n\t\t\teight = Card(\"Eight\", 8, suit)\n\t\t\tcards.append(eight)\n\n\t\t\tnine = Card(\"Nine\", 9, suit)\n\t\t\tcards.append(nine)\n\n\t\t\tten = Card(\"Ten\", 10, suit)\n\t\t\tcards.append(ten)\n\n\t\t\tjack = Card(\"Jack\", 10, suit)\n\t\t\tcards.append(jack)\n\n\t\t\tqueen = Card(\"Queen\", 10, suit)\n\t\t\tcards.append(queen)\n\n\t\t\tking = Card(\"King\", 10, suit)\n\t\t\tcards.append(king)\n\n\t\treturn cards",
"def __init__(self):\n self.deck_of_cards = [Card(value[j], suit[i])\\\n for i in range(len(suit))\\\n for j in range(len(value))]",
"def __init__(self):\n self.deck = []\n\n for i in SUITS:\n for j in RANKS:\n self.deck.append(Card(i, j))",
"def test_deck_contains_all_cards(self):\n\n # I'm using collections.Counter so that the order is ignored (as in a\n # set) but that multiples are accounted for.\n expected = collections.Counter([\n ('r', 'i'), ('r', 'i'), ('r', 'i'),\n ('r', 2), ('r', 3), ('r', 4), ('r', 5), ('r', 6), \n ('r', 7), ('r', 8), ('r', 9), ('r', 10),\n\n ('g', 'i'), ('g', 'i'), ('g', 'i'),\n ('g', 2), ('g', 3), ('g', 4), ('g', 5), ('g', 6),\n ('g', 7), ('g', 8), ('g', 9), ('g', 10),\n\n ('b', 'i'), ('b', 'i'), ('b', 'i'),\n ('b', 2), ('b', 3), ('b', 4), ('b', 5), ('b', 6),\n ('b', 7), ('b', 8), ('b', 9), ('b', 10),\n\n ('y', 'i'), ('y', 'i'), ('y', 'i'),\n ('y', 2), ('y', 3), ('y', 4), ('y', 5), ('y', 6),\n ('y', 7), ('y', 8), ('y', 9), ('y', 10),\n\n ('w', 'i'), ('w', 'i'), ('w', 'i'),\n ('w', 2), ('w', 3), ('w', 4), ('w', 5), ('w', 6),\n ('w', 7), ('w', 8), ('w', 9), ('w', 10), ])\n\n self.assertEqual(expected, collections.Counter(deck.deck_gen()))",
"def cards(\n self,\n cards: Union[List[Tuple[int, str, str]], List[Any]]\n ) -> None:\n self._cards: List[List[Tuple[int, str, str]]] = [cards]",
"def create_deck(self):\n\n deck = []\n\n # Suits and face values\n suits = ['Clubs', 'Diamonds', 'Hearts', 'Spades']\n face_values = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\n # Creating deck\n for suit in suits:\n for value in face_values:\n deck.append(Card(suit[0], value))\n\n # Adding jokers\n if self.jokers:\n deck.append(Card('Jk', 0))\n deck.append(Card('Jk', 0))\n\n return deck",
"def create_deck():\r\n deck = []\r\n faces = [2,3,4,5,6,7,8,9,10,\r\n 'Jack','Queen','King','Ace']\r\n suits = ['Spades', 'Diamonds', 'Clubs', 'Hearts']\r\n for face in faces:\r\n for suit in suits:\r\n # Creates a card-tuple and adds it to the deck.\r\n deck.append((face, suit))\r\n \r\n return deck",
"def create_deck(number = 1):\n deck = []\n for suit, face in itertools.product(suit_names, face_names):\n if face == \"Ace\":\n value = 11\n elif face in ['Jack', 'Queen', 'King']:\n value = 10\n else:\n value = int(face)\n img = Image(img_path+suit+\"_\"+face + \".png\")\n state = True\n card = Card(suit, face, value, img, state)\n deck.append(card)\n random.shuffle(deck)\n return deck",
"def __init__(self, deckid, playerclass, decklist):\n self.deckid = int(deckid)\n self.playerclass = str(playerclass)\n if decklist is not None:\n self.decklist = decklist\n else:\n self.decklist = []",
"def __init__(self):\n self.deckcards = []\n for suit_by_number in range(4):\n for rank_by_number in range(1, 14):\n card = card_create.Createcard(suit_by_number, rank_by_number)\n self.deckcards.append(card)",
"def __init__(self):\r\n self.__suit_dict = [{\"Diamonds\": 1}, {\"Spades\": 2}, {\"Harts\": 3}, {\"Clubs\": 4}]\r\n self.cards_list = []\r\n for suit in self.__suit_dict:\r\n for value in range(1, 14):\r\n self.cards_list.append(Card(suit, value))\r\n self.Shuffle()",
"def __init__(self):\n self.cards = [Card(face=card[0], value=card[1], suit=suit)\n for card in CARD_VALUES().items() for suit in CARD_SUITS()]",
"def __init__(self, cards_on_table=None, cards=[], hands_list=[]):\n self.cards = cards\n self.cards_on_table = cards_on_table\n self.hands_list = hands_list",
"def __init__(self):\r\n \r\n self.deck_of_cards= deque([(y,x) for x in range(1,14) for y in Cards.shades])",
"def get_game_cards(gameId):\n pass",
"def get_card_list(self):\n return self.cards",
"def fill_standard_deck(self):\n for name in [\"ace\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\", \"jack\",\n \"queen\", \"king\"]:\n for suit in [\"hearts\", \"diamonds\", \"spades\", \"clubs\"]:\n self.cards.append(card.Card(name, suit, self.card_values[name]))",
"def makedeck(deck):\r\n #making deck of cards\r\n SUITS = [\"Hearts\",\"Diamonds\",\"Clubs\",\"Spades\"]\r\n VALUES = [\"A\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"J\",\"Q\",\"K\"]\r\n for e in SUITS:\r\n for i in VALUES:\r\n card = i+\" \"+e\r\n deck.append(card)",
"def __init__(self):\n suits = [\"hearts\", \"spade\", \"diamond\", \"clubs\"]\n values = ['A','2','3','4','5','6','7','8','9','10','J','Q','K']\n self.cards = []\n for suit in suits:\n for value in values:\n self.cards.append((value, suit))",
"def __init__(self, cards):\n self.cards = cards",
"def __repr__(self):\n return f\"Deck({self.cards})\"",
"def validate_cards(self, cards_list):\n return set(self.hand).issubset(set(cards_list))",
"def test_cards_get_list(self):\n pass",
"def dealCards(deck, player, numCards):\n print \"dealing %s cards to %s...\" % (numCards, player.name)\n for card in range(numCards):\n card = deck[0]\n deck.pop(0)\n player.cards.append(card)\n print \"added %s card for %s\" % (card, player.name)\n print player.cards",
"def __init__(self, cards = []):\n self.cards=cards",
"def __init__(self):\n self._cards = []\n #Add a single card for each suit and rank\n for suit in Card.SUITS:\n for rank in Card.RANKS:\n c = Card(rank, suit)\n self._cards.append(c)",
"def __init__(self, deck_type=\"standard\"):\n if deck_type == \"standard\":\n self.card_values = {\n \"ace\": 1,\n \"two\": 2,\n \"three\": 3,\n \"four\": 4,\n \"five\": 5,\n \"six\": 6,\n \"seven\": 7,\n \"eight\": 8,\n \"nine\": 9,\n \"ten\": 10,\n \"jack\": 10,\n \"queen\": 10,\n \"king\": 10\n }\n self.cards = []\n self.fill_standard_deck()\n else:\n raise Exception(\"Only standard deck type is supported right now.\")"
]
| [
"0.7067736",
"0.68714076",
"0.681387",
"0.6780888",
"0.6776782",
"0.67323965",
"0.6661562",
"0.66035146",
"0.6578332",
"0.654974",
"0.6543735",
"0.65101725",
"0.64868504",
"0.6482843",
"0.6464824",
"0.6463143",
"0.6441016",
"0.64017296",
"0.6401417",
"0.6359067",
"0.6344033",
"0.6330611",
"0.6322782",
"0.6322062",
"0.6309971",
"0.6304996",
"0.6280504",
"0.62475294",
"0.6241931",
"0.6236419"
]
| 0.729421 | 0 |
A mediumdifficulty question removes 1020 random cards from your deck, then asks which of four options the top card of your deck is most likely to be. Should be useful to most games, and it's easy to implement in a gameagnostic way, so it's here instead of in gamespecific code. | def most_likely_top_card(self, deck):
question_string = "If {}have been removed from your deck, which of the following cards is most likely to be the top card of your deck?"
answer_suffix = "is most likely to be the top card"
reduced_deck = copy.deepcopy(deck)
cards_to_remove = random.choice(range(10,21))
print "Chose to remove {} cards".format(cards_to_remove)
removed_cards = {}
while sum([ removed_cards[key] for key in removed_cards.keys() ]) < cards_to_remove:
drawn_card = random.choice([ card for card in reduced_deck.decklist if card.count > 1 ])
print "Removed a copy of {} from the deck.".format(drawn_card.name)
drawn_card.count -= 1
print "{} cards remain in the deck.".format(sum([ card.count for card in reduced_deck.decklist ]))
# if the card is in the group we've already removed, just
# increment, otherwise add to that set.
if drawn_card.name in [ c for c in removed_cards.keys()]:
removed_cards[drawn_card.name] += 1
else:
removed_cards[drawn_card.name] = 1
print "Removed: {}".format(removed_cards)
reduced_deck_size = sum([ card.count for card in reduced_deck.decklist ])
removed_cards_string = ""
for key in removed_cards.keys():
c = removed_cards.pop(key)
copy_plural = "copies" if c > 1 else "copy"
if len(removed_cards.keys()) == 0:
removed_cards_string += "and "
removed_cards_string += "{0} {1} of {2}, ".format(c, copy_plural, key)
print removed_cards_string
print question_string.format(removed_cards_string)
choices = 4
chosen_cards = []
print "The deck is now: {}".format(reduced_deck.decklist)
while len(chosen_cards) < choices:
this_card = random.choice(reduced_deck.decklist)
print "Chose: {}".format(this_card.name)
# Second test is so that we don't have to deal with ties - however,
# now we just have to make sure that there are at least 4 different
# card counts remaining in the deck, which should be the norm - but
# it's not guaranteed!
if (this_card not in chosen_cards) and (this_card.count not in [ card.count for card in chosen_cards ]):
chosen_cards.append(this_card)
print "List now contains: {}".format([ card.name for card in chosen_cards ])
card_odds_pairings = []
for card in chosen_cards:
# top_card_odds = hypergeom.sf(1, reduced_deck_size, card.count, 1)
top_card_odds = card.count / float(reduced_deck_size)
card_odds_pairings.append((top_card_odds, card.name))
sorted_odds_pairings = sorted(card_odds_pairings, key=operator.itemgetter(0))
print "Cards with odds: {}".format(sorted_odds_pairings)
question_string = question_string.format(removed_cards_string)
correct = sorted_odds_pairings[-1][1]
# No need to shuffle: this list is already in a random order!
possible = [ card[1] for card in card_odds_pairings]
return question_string, correct, possible, answer_suffix, "the top card of your deck" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def choose_hand(hand, deck):\n possible = list()\n for c in combinations(hand, 4):\n possible.append([Cribbage.expected_score(list(c), deck), c])\n best = max(possible, key = lambda i : i[0])\n discard = list(set(hand) - set(best[1]))\n return best[1], discard",
"def test_seven_cards_poker(self):\n self.assertEqual(best_hand(\"6C 7C 8C 9C TC 5C JS\".split()),\n ('6C', '7C', '8C', '9C', 'TC'))\n self.assertEqual(best_hand(\"TD TC TH 7C 7D 8C 8S\".split()),\n ('TD', 'TC', 'TH', '8C', '8S'))\n self.assertEqual(best_hand(\"JD TC TH 7C 7D 7S 7H\".split()),\n ('JD', '7C', '7D', '7S', '7H'))",
"def defense_options(attack, hand, dank):\n options = []\n low = RANK_NUM[attack.rank]\n if attack.suit == dank:\n for card in hand:\n rank = RANK_NUM[card.rank]\n if card.suit == dank and rank > low:\n options.append(card)\n\n else:\n for card in hand:\n rank = RANK_NUM[card.rank]\n if (card.suit == attack.suit and rank > low) or card.suit == dank:\n options.append(card)\n return options",
"def magician(*cards, n=1):\n # Obviously not a random card, put your code here instead.\n from random import choice\n deck = [f'{r} {s}' for r in RANKS for s in SUITS]\n for card in cards:\n deck.remove(card)\n return choice(deck)",
"def best_hand(cards):\n\n\tvalues = [card[0:-1] for card in cards]\n\tsuits = [card[-1] for card in cards]\n\n\t# Dictionary for converting card strings to numbers\n\tcardNums = {\"A\":14, \"K\":13, \"Q\":12, \"J\":11, \"10\":10, \"9\":9, \"8\":8, \\\n\t\t\t\"7\":7, \"6\":6, \"5\":5, \"4\":4, \"3\":3, \"2\":2}\n\n\t# Convert card values to real numbers\n\tunsortedValues = [cardNums[value] for value in values]\n\t# unsorted values is necessary for retrieving card + suit\n\t# later\n\tvalues = unsortedValues [:] # make a copy of list\n\tvalues.sort() \t\t# sort values \n\tvalues.reverse()\t# largest # first \n\n\t### Check for possible hands\n\n\n\t# prepare variables for holding potential hands\n\tfourkind = []\n\tflush = [] \t# stores the suit of the flush\n\tstraight = [] \t# stores the highest number of straight \n\tthreekind = [] # stores the best possible 3-of-a-kind \n\tpairs = [] \t# stores one number for each pair\n\n\t# prepare counters for tracking possible hands\n\tstraightCounter = 1 # always have a straight of 1\n\t\n\t# Check for flush\n\tfor suit in suits:\n\t\tif suits.count(suit) >= 5:\n\t\t\tflush = suit\t\n\t\t\tbreak\n\n\t# check for straight, 4-kind, 3-kind, pairs\n\tfor i in range(6): # Don't process the last card\n\n\t\t# Check for straight if still possible\n\t\tif len(straight) == 0:\n\t\t\tprint \"values = \" + str(values)\n\t\t\tstraightSeq = [values.count(values[i]-j) >= 1 for j in range(1,5)]\t\n\t\t\tprint \"straightSeq = \" + str(straightSeq)\n\t\t\tif straightSeq.count(True) == 4:\n\t\t\t\tstraight.append(values[i])\t\n\n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif values[i] == 5:\n\t\t\t\t# check for 4-2-3 first\n\t\t\t\tstraightSeq = [values.count(values[i]-j) >= 1 for j in range(1,4)]\t\n\t\t\t\t# check for Ace\n\t\t\t\tif straightSeq.count(True) == 3 and \\\n\t\t\t\t\tvalues.count(cardNums[\"A\"]) >= 1:\n\t\t\t\t\tstraight.append(values[i])\t\n\n\t\t# Check for 4-kind\n\t\tif len(fourkind) == 0 and values.count(values[i]) == 4:\n\t\t\tfourkind = [values[i]]\n\t\t# Check for 3-kind but don't add same one twice \n\t\telif values.count(values[i]) == 3 and \\\n\t\t\tthreekind.count(values[i]) == 0:\t\n\t\t\tif len(threekind) == 0:\n\t\t\t\tthreekind.append(values[i])\n\t\t\telse: # add to pairs\n\t\t\t\tpairs.append(values[i])\n\t\t# Check for pairs, don't add same pair twice\n\t\telif values.count(values[i]) == 2 and \\\n\t\t\tpairs.count(values[i]) == 0: \n\t\t\tpairs.append(values[i])\n\n\t\n\n\t### Determine hand strength based on found hands\n\t# Since values are separated from suits, have to iterate\n\t# through unsorted values to find correct index of each card\n\n\tbesthand = []\n\n\t# Straight flush\n\tif len(straight) != 0 and len(flush) != 0:\n\t\tfor i in range(5): \n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif i == 4 and straight[0] == cardNums[\"5\"]:\n\t\t\t\tcardIndex = unsortedValues.index(cardNums[\"A\"])\n\t\t\telse:\n\t\t\t\tcardIndex = unsortedValues.index(straight[0] - i)\n\n\t\t\tcard = cards[cardIndex] \n\t\t\tif card[-1] == flush:\n\t\t\t\tbesthand.append(card)\n\t\t\telse:\n\t\t\t\tbreak\n\t\tif len(besthand) == 5:\n\t\t\treturn (besthand, Ranks.StraightFlush)\n\t\telse: # not a straight flush, so re-init besthand\n\t\t\tbesthand = []\n\n\t# Four of a kind\n\tif len(fourkind) != 0:\n\t\tcardValue = convNumToCard(fourkind[0])\n\t\t# insert the 4 out of 5 cards b/c suit is known\n\t\tbesthand = [cardValue + \"S\", cardValue + \"H\", cardValue + \"C\", cardValue + \"D\"]\n\t\t# add the highest value card that isn't 4-of-a-kind\n\t\tfor i in range(7):\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != fourkind[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tbreak\n\t\treturn (besthand, Ranks.FourKind)\n\t# Full House\t\n\telif len(threekind) != 0 and len(pairs) != 0:\n\t\tfor i in range(7): # add 3-kind to besthand\n\t\t\tif unsortedValues[i] == threekind[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 3:\n\t\t\t\t\tbreak\n\t\t\n\t\tfor i in range(7): # add pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.FullHouse)\n\t# Flush\n\telif len(flush) != 0:\n\t\t# iterate through sorted cards, add that card if its\n\t\t# suit matches the flush suit\n\t\tfor i in range(7):\n\t\t\t# find card in original unsorted list\n\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\tcard = cards[cardIndex] \n\t\t\tif card[-1] == flush[0]:\n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.Flush)\n\t# Straight\n\telif len(straight) != 0:\n\n\t\tfor i in range(5): \n\t\t\t# check for 5-4-3-2-A straight\n\t\t\tif i == 4 and straight[0] == cardNums[\"5\"]:\n\t\t\t\tcardIndex = unsortedValues.index(cardNums[\"A\"])\n\t\t\telse:\n\t\t\t\tcardIndex = unsortedValues.index(straight[0] - i)\n\t\t\tcard = cards[cardIndex] \n\t\t\tbesthand.append(card)\n\t\treturn (besthand, Ranks.Straight)\n\t# Three of a kind\n\telif len(threekind) != 0:\n\t\tfor i in range(7): # add 3-kind to besthand\n\t\t\tif unsortedValues[i] == threekind[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 3:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add two high cards to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != threekind[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.ThreeKind)\n\t# Two pair\n\telif len(pairs) == 2:\n\t\tfor i in range(7): # add 1st pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 2:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add 2nd pair to besthand\n\t\t\tif unsortedValues[i] == pairs[1]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 4:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add high card to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != pairs[0] and values[i] != pairs[1]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.TwoPair)\n\t# Pair\n\telif len(pairs) == 1:\n\t\tfor i in range(7): # add pair to besthand\n\t\t\tif unsortedValues[i] == pairs[0]:\n\t\t\t\tbesthand.append(cards[i])\n\t\t\t\tif len(besthand) == 2:\n\t\t\t\t\tbreak\n\t\tfor i in range(7): # add high card to best hand\n\t\t\t# search sorted list for high card\n\t\t\tif values[i] != pairs[0]:\n\t\t\t\t# find card in original unsorted list\n\t\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\t\tcard = cards[cardIndex] \n\t\t\t\tbesthand.append(card)\n\t\t\t\tif len(besthand) == 5:\n\t\t\t\t\tbreak\n\t\treturn (besthand, Ranks.Pair)\n\t# High card\n\telse:\n\t\tfor i in range(7):\n\t\t\tcardIndex = unsortedValues.index(values[i])\n\t\t\tcard = cards[cardIndex] \n\t\t\tbesthand.append(card)\n\t\t\tif len(besthand) == 5:\n\t\t\t\treturn (besthand, Ranks.HighCard)",
"def DealerLogic(hand):\r\n inithand = [0,0,0,0,0]\r\n temphand = [0,0,0,0,0]\r\n for j in range(5):\r\n inithand[j] = hand[j] #just numericalvalues of hand\r\n temphand[j] = hand[j]\r\n possiblecards = []\r\n basesuits = CountSuits(inithand)\r\n\r\n for i in range(5):\r\n for j in range(5):\r\n temphand[j] = inithand[j] #resetting for correct value\r\n temphand[i] = 0 #generic trump value for hand\r\n temphand = sorted(temphand) #putting in ascending order again\r\n temp = CountSuits(temphand)\r\n if temp[4] < basesuits[4]: #if by replacing that card, number of suits decreases \r\n possiblecards.append(i) #save index of card \r\n\r\n if len(possiblecards) == 0: #if can't decrease number of suits, tries to make as close to less suited\r\n if basesuits[4] == 1: #can't make less suited as all one suit already\r\n return max(inithand) #smallest card possible discarded\r\n elif basesuits[4] == 2: #two suited already (2 of 1 suit, 3 of other), can't make less suited\r\n discardsuit = basesuits.index(2) #finds suit that has 2\r\n else: #three suited, can't make less (1 trump, 2 of one, 2 of other)\r\n for i in range(len(OFFSUITS)):\r\n for j in range(len(OFFSUITS[i])):\r\n if OFFSUITS[i][j] in inithand:\r\n return OFFSUITS[i][j] #returning minimum offsuit card\r\n if discardsuit == 1: #discard ss\r\n return inithand[1] \r\n elif discardsuit == 2: #discard os1\r\n if basesuits[1] != 0: #other option is ss\r\n return inithand[4]\r\n else: #other option is os2\r\n return inithand[1]\r\n else: #discard os2\r\n return inithand[4]\r\n elif len(possiblecards) == 1: #if only one card makes less suited\r\n return inithand[possiblecards[0]]\r\n else: #multiple choices on proper discard, discard lowest card\r\n for i in range(len(OFFSUITS)):\r\n for j in range(len(OFFSUITS[i])):\r\n if OFFSUITS[i][j] in inithand:\r\n return OFFSUITS[i][j] #returning minimum offsuit card\r",
"def copies_in_top_five(self, deck):\n question_string = \"After drawing your opening hand with one copy of {card}, how likely is it that another copy of {card} is in the top five cards of your deck?\"\n answer_suffix = 'percent'\n # That's another reason why we don't choose a card earlier: we might be\n # interested in a card with a specific quality.\n chosen_card = random.choice([ card for card in deck.decklist if card.count > 1 ])\n remaining_copies = chosen_card.count - 1\n remaining_deck = sum([c.count for c in deck.decklist]) - 7\n\n in_top_five_chance = hypergeom.sf(1, remaining_deck, remaining_copies, 5)\n in_top_five_chance = in_top_five_chance * 100\n correct_string = \"{:.2f}\".format(in_top_five_chance)\n\n wrongs = self.gen_wrong(in_top_five_chance, 'percent', 4)\n possible = wrongs + [correct_string]\n random.shuffle(possible)\n\n print \"Chance of a copy of {} in the next five cards: {}\".format(chosen_card.name, correct_string)\n return question_string.format(card=chosen_card.name), correct_string, possible, answer_suffix, chosen_card",
"def best_hand(cards):\n return max(generate_all_hands(cards))",
"def create_best_hand_bruteforce(cards):\n \n combos = unique_combinations(cards, 5)\n hands = [Hand(combo) for combo in combos]\n hands = sorted(hands, reverse=True)\n return hands[0]",
"def get_chaser_answer(self, q):\n rand_num = random.randint(1, 4)\n if rand_num <= 3: # 75%\n return q.get_answer() # give right answer\n else: # 15% - give wrong answer\n options = [1, 2, 3, 4] # all option\n options.pop(q.get_answer()-1) # pop right option\n return options[random.randint(0, 2)] # return random wrong option",
"def Deal():\r\n cardsout = []\r\n cardoptions = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\r\n topcardoptions = [0,2,3,4,5,6]\r\n topcard = topcardoptions[random.randint(0,5)]\r\n cardoptions.pop(cardoptions.index(topcard))\r\n cardsout.append(topcard)\r\n\r\n if SHOWHAPPENINGS == True:\r\n disp = card_dict[topcard]\r\n print(\"Topcard is: {}\".format(disp)) \r\n\r\n for i in range(4):\r\n numcards = 0\r\n while numcards < 5:\r\n possiblerange = len(cardoptions) - 1\r\n cardindex = random.randint(0,possiblerange)\r\n card = cardoptions[cardindex]\r\n cardsout.append(card)\r\n cardoptions.pop(cardoptions.index(card))\r\n PlayerHands[i].append(card)\r\n numcards += 1\r\n PlayerHands[i] = sorted(PlayerHands[i]) #putting into ascending order\r\n if i == 0 or i == 2:\r\n PlayerHands[i].append(\"RedTeam\")\r\n else: \r\n PlayerHands[i].append(\"BlackTeam\")\r\n \r\n PlayerHands[0].append(PLAYER1)\r\n PlayerHands[1].append(PLAYER2)\r\n PlayerHands[2].append(PLAYER3)\r\n PlayerHands[3].append(PLAYER4)\r\n #PlayerHand format = [card1,card2,card3,card4,card5,Team,Name]\r\n\r\n return topcard",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def pick_questions(standard_info):\n\n questions = []\n\n # goes through each item in the standard into\n for item in standard_info:\n standard_id = item[0]\n count = item[1]\n options = standards[standard_id][\"Questions\"]\n # if the questions you have allocated to that standard are less than the amount\n # of questions that standard technically has to offer, just add the amount you are able to\n if count < len(options):\n available = options[:count]\n questions.extend(available)\n # alternatively, if the number you have allocated to that standard is exactly equal to the amount\n # of q's that standard has, just \"divide evenly\" and put all the questions in once\n elif count == len(options):\n questions.extend(options)\n # if you have more questions allocated to that standard than you have questions available, duplicate!\n elif len(options) < count:\n if not count % len(options):\n to_add = options * (count / len(options))\n questions.extend(to_add)\n else:\n to_add = options * (count / len(options))\n questions.extend(to_add)\n remainder = count % len(options)\n winners = defaultdict(int)\n while remainder:\n choices = options[:]\n winner = random.choice(choices)\n choices.pop(choices.index(winner))\n winners[winner] += 1\n remainder -= 1\n for q in options:\n print q\n for i in range(1, winners[q] + 1):\n questions.append(q)\n print questions\n\n return questions",
"def pop_ans(self, n_cards):\n\n cards = self.answer_cards[self.used_answers : self.used_answers + n_cards]\n self.used_answers += n_cards\n\n return cards",
"def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 50:\n break\n\n return answers",
"def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 50:\n break\n\n return answers",
"def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 50:\n break\n\n return answers",
"def pick_next_card_query(deck_format, seed=None):\n return random.choice(collect_queries_by_weight(deck_format))",
"def study_deck(path, deck, num=None):\n conn = sqlite3.connect(path)\n with conn:\n if num:\n cards = conn.execute('''SELECT front, back FROM cards\n WHERE deck=(?) limit (?)''', (deck, num)).fetchall()\n else:\n cards = conn.execute('SELECT front, back FROM cards WHERE deck=(?)',\n (deck,)).fetchall()\n random.shuffle(cards)\n for front, back in cards:\n print('Front: %s' % front)\n input('Answer? ')\n print('Back: %s\\n' % back)",
"def simulate(deck): \n \n # Initialize Banker and Player\n # player_third_card is initialized to -10 to signify that it doesn't exist.\n banker = 0\n player = 0\n player_third_card = -10\n \n# Deal out two hands of two cards\n player = (player + deck.pop()) % 10\n player = (player + deck.pop()) % 10\n \n banker = (banker + deck.pop()) % 10\n banker = (banker + deck.pop()) % 10\n \n# Check for natural\n if player >= 8 and banker >= 8:\n return 'tie'\n elif banker >= 8:\n return 'banker'\n elif player >= 8:\n return 'player'\n \n\n# Run through Player hand\n if player <= 5:\n player_third_card = deck.pop()\n player = (player + player_third_card) % 10\n \n\n# Run through Banker hand\n if player_third_card == -10 and banker < 6:\n banker = (banker + deck.pop()) % 10\n elif banker <= 2:\n banker = (banker + deck.pop()) % 10\n elif banker == 3 and player_third_card != 8:\n banker = (banker + deck.pop()) % 10\n elif banker == 4 and player_third_card >= 2 and player_third_card <=7:\n banker = (banker + deck.pop()) % 10\n elif banker == 5 and player_third_card >= 4 and player_third_card <=7:\n banker = (banker + deck.pop()) % 10\n elif banker == 6 and (player_third_card == 6 or player_third_card == 7):\n banker = (banker + deck.pop()) % 10\n \n \n# Compare hands and return results\n if player > banker:\n return 'player'\n elif banker > player:\n return 'banker'\n else:\n return 'tie'",
"def main_questions(money, grain, people):\n quest_buy = [Q1, Q2, Q3, Q6, Q7]\n question = random.choice(quest_buy)\n print(question)\n answer = input()\n while answer.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer = input()\n answer = int(answer)\n if question == Q1:\n money = money - answer * 12\n elif question == Q2:\n money -= answer * 14\n elif question == Q3:\n money -= answer * 13\n elif question == Q6:\n money -= answer * 10\n elif question == Q7:\n money -= answer * 15\n grain += answer\n\n quest_sell = [Q4, Q5, Q8, Q9, Q10]\n question_2 = random.choice(quest_sell)\n print(question_2)\n answer = input()\n while answer.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer = input()\n answer = int(answer)\n if question == Q4:\n money += answer * 7\n elif question == Q5:\n money += answer * 5\n elif question == Q8:\n money += answer * 6\n elif question == Q9:\n money += answer * 9\n elif question == Q10:\n money += 8\n grain -= answer\n\n print(DISTRIBUTION_OF_GRAIN)\n answer_3 = input()\n while answer_3.isdigit() is False:\n print(INPUT_INT_VALUE)\n answer_3 = input()\n answer_3 = int(answer)\n grain -= answer_3\n if grain / people > 90:\n people *= 1.1\n elif grain / people < 40:\n people *= 0.9\n return int(money), int(grain), int(people)",
"def indicate_discard_card(whose_turn,players):\n cards_to_choose_from = players[whose_turn].hand.cards\n players[whose_turn].hand.print_cards()\n chosen_to_discard = int(input('Select a card to discard. Type a number. '))\n return chosen_to_discard",
"def choose_deck():\n deck_prompt = \"What deck would you like to play?\"\n choice = utils.choose_list(utils.DECKLIST, deck_prompt)\n deck = dicts.ALL_DECKS[choice]\n os.system('clear')\n return deck",
"def populate_game_questions():\n indices = random.sample(range(0, len(quizquestion.questions_all)), 5) # If user doesn't specify, choose 5 random questions\n return quizquestion.QuizQuestion.get_game_questions(indices)",
"def picksomequestions():\n answers = dict()\n for question in nlist:\n answers[question[0]] = question[1]\n if len(answers.keys()) > 49:\n break\n\n return answers",
"def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))",
"def showdown(self):\r\n\r\n poker_hands = []\r\n message = \"\"\r\n for player in self.players:\r\n poker_hands.append(player.hand.best_poker_hand(self.community_cards.cards))\r\n\r\n # Reveal all cards when the round is over\r\n player.reveal_cards()\r\n\r\n if poker_hands[0].type > poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].type), str(poker_hands[1].type))\r\n self.players[0].credits += self.pot\r\n\r\n if poker_hands[0].type < poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].type), str(poker_hands[0].type))\r\n self.players[1].credits += self.pot\r\n\r\n if poker_hands[0].type == poker_hands[1].type:\r\n if poker_hands[0].highest_values > poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].highest_values), str(poker_hands[1].highest_values))\r\n self.players[0].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values < poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].highest_values), str(poker_hands[0].highest_values))\r\n self.players[1].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values == poker_hands[1].highest_values:\r\n message = \"It is a draw! Both players had >{}< and highest value >{}<\".format(\r\n poker_hands[0].type.name, str(poker_hands[0].highest_values))\r\n\r\n for player in self.players:\r\n player.credits += (self.pot // len(self.players))\r\n else:\r\n self.game_message_warning.emit(\"Incorrect comparison of poker hands\")\r\n\r\n self.new_output.emit(message)\r\n self.game_message.emit(message)\r\n self.new_credits.emit()\r\n self.new_pot.emit()"
]
| [
"0.66956043",
"0.6490272",
"0.6446676",
"0.6376408",
"0.6299008",
"0.6255553",
"0.62223595",
"0.6184848",
"0.6184359",
"0.6096527",
"0.6076049",
"0.60742056",
"0.60742056",
"0.60742056",
"0.60742056",
"0.606422",
"0.5953516",
"0.5947961",
"0.5947961",
"0.5947961",
"0.5944662",
"0.59255016",
"0.5922797",
"0.5906789",
"0.5872767",
"0.5858135",
"0.5832218",
"0.5827984",
"0.5826965",
"0.58181"
]
| 0.70982635 | 0 |
Return a scoped session (according to SQLAlchemy docs, this returns always the same object within a thread, and a different object in a different thread. Moreover, since we update the scopedsessionclass upon forking, also forks have different session objects. | def get_scoped_session():
if scopedsessionclass is None:
s = None
else:
s = scopedsessionclass()
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_session():\n session = scoped_session(sessionmaker(bind=engine))\n return session",
"def get_session():\n return scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))",
"def get_scoped_session():\n scoped_session = SESSION()\n try:\n yield scoped_session\n scoped_session.commit()\n except Exception as e:\n scoped_session.rollback()\n logging.error(str(e))\n raise\n finally:\n scoped_session.close()",
"def get_session(self):\n # Create the session\n kwargs = self._get_database_specific_session_args()\n session_factory = orm.sessionmaker(\n bind=self._engine, expire_on_commit=False, **kwargs\n )\n session_cls = orm.scoped_session(session_factory)\n\n return session_cls",
"def session(self):\n if not self._session: #Create new session if none exists\n return self._new_session()\n return self._session",
"def get_session(self):\n yield from self._ensure_session_valid()\n return self.session",
"def session(self) -> Session:\n if self._session is None:\n self._session = Session()\n\n return self._session",
"def session(self):\n return self.session_store.get_session(backend=\"datastore\")",
"def get_session(self):\n return self._session()",
"def session(self):\n if self._session is None:\n self.init_session()\n\n return self._session",
"def new_session(self):\n return self._SessionLocal()",
"def get_session(self):\n return ESSession(self)",
"def session(self) -> \"Session\":\n return self._instance",
"def session(self):\n return session",
"def session(self):\n return self.session_store.get_session()",
"def session(self):\n return self.session_store.get_session()",
"def session(self):\n return self.session_store.get_session()",
"def get_session(self):\n session = Session(self.settings)\n self.sessions.append(session)\n return session",
"def session(self):\n\n if not self.is_active:\n raise errors.InactiveTransaction()\n\n return self._orm_session_proxy",
"def get_session() -> requests.Session:\n return _get_session_from_cache(thread_ident=threading.get_ident())",
"def session(self):\n return self.ssession()",
"def get_session(self):\n return self.session",
"def __session(self) -> boto3.Session:\n return self.__ctx.get_session()",
"def single_threaded_session():\n return make_session(num_cpu=1)",
"def create_scoped_session(self):\n self.engine = self.create_engine(models.Base)\n session_factory = sessionmaker(bind=self.engine) \n #class_=Session)\n return scoped_session(session_factory)",
"def session_scope(scoped=True):\n session = ScopedSession() if scoped else DBSession()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n # detach all instances in this session\n # and release connection\n session.close()",
"def session_scope(\n self,\n session=None,\n can_inherit=True,\n must_inherit=False,\n auto_flush=None,\n read_only=None,\n ):\n\n if must_inherit and not self.has_session():\n raise RuntimeError(\n \"Session scope requires it to be wrapped in a pre-existing \"\n \"session. This was likely done to prevent a leaked session \"\n \"from a function which returns a query object.\"\n )\n\n # Set up local session\n inherited_session = True\n if session:\n local = session\n elif not (can_inherit and self.has_session()):\n inherited_session = False\n local = self._new_session(auto_flush, read_only)\n else:\n local = self.current_session()\n\n if inherited_session and (read_only is not None or auto_flush is not None):\n logger.warning(\n \"Attempt to mark an inherited session with read_only={} or auto_flush={} will be ignored.\".format(\n read_only, auto_flush\n )\n )\n\n # Context manager functionality\n try:\n with self.context(session=local):\n yield local\n\n if not inherited_session:\n local.commit()\n\n except Exception as msg:\n logging.error(f\"Rolling back session {msg}\")\n local.rollback()\n raise\n\n finally:\n if not inherited_session:\n local.expunge_all()\n local.close()",
"def session(self):\n # return self.session_store.get_session()\n return self.session_store.get_session(\n name='mc_session',\n factory=sessions_memcache.MemcacheSessionFactory)",
"def session(self):\n return self.__session",
"def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session"
]
| [
"0.7187179",
"0.71814144",
"0.710347",
"0.7078379",
"0.7068919",
"0.6859953",
"0.67099005",
"0.67088383",
"0.6662891",
"0.6648465",
"0.6634474",
"0.6613667",
"0.6585746",
"0.658351",
"0.65781957",
"0.65781957",
"0.6575925",
"0.6549415",
"0.65426534",
"0.65316164",
"0.65285134",
"0.6520213",
"0.64808625",
"0.6466391",
"0.64486974",
"0.6443169",
"0.64314455",
"0.64178383",
"0.6376418",
"0.63710797"
]
| 0.7503224 | 0 |
>>> chequejaCaixa([[2,7,6,3,1,4,9,5,8],[8,5,4,9,6,2,7,1,3],[9,1,3,8,7,5,2,6,4],[4, 6, 8, 1, 2, 7, 3, 9, 5], [ 5, 9, 7, 4, 3, 8, 6, 2, 1], [1, 3, 2, 5, 9, 6, 4, 8, 7],[3, 2, 5, 7, 8, 9, 1, 4, 6], [6, 4, 1, 2, 5, 3, 8, 7, 9], [7, 8, 9, 6, 4, 1, 5, 3, 2]]) True >>> chequejaCaixa([[2,7,5,3,1,4,9,6,8],[8,5,4,9,6,2,7,1,3],[9,1,3,8,7,5,2,6,4],[4, 6, 8, 1, 2, 7, 3, 9, 5], [5, 9, 7, 4, 3, 8, 6, 2,1],[1,3,2,5,9,6,4,8,7],[3,2,5,7,8,9,1,4,6],[6,4,1,2,5,3,8,7,9],[7,8,9,6,4,1,5,3,2]]) False | def chequejaCaixa(m):
for caixaX in range(3):
for caixaY in range(3):
#Per una caixa
numsUtilitzats =""
for i in range (caixaX*3, caixaX*3 + 3):
for j in range(caixaY*3, caixaY*3 + 3):
if(m[i][j] < 1 or m[i][j] > 9):
return False
if(str(m[i][j]) in numsUtilitzats):
return False
numsUtilitzats += str(m[i][j])
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Tablero_completo(self,matriz:list) -> bool:\n\t\tfor i in range(6):\n\t\t\tfor j in range(6):\n\t\t\t\tif matriz[i][j]==0:\n\t\t\t\t\treturn False\n\t\treturn True",
"def entrada(self, linha, coluna):\n matriz = [[ 0 for x in range(coluna) ] for y in range (linha)]\n vetor_linha = [0, 1, 0, -1]\n vetor_coluna = [1, 0, -1, 0]\n l = 0\n c = 0\n direcao = 0\n for n in range(linha * coluna):\n matriz[l][c] = n + 1\n if ((vetor_coluna[direcao] == 1 and c == coluna - 1) or \n (vetor_linha[direcao] == 1 and l == linha - 1) or\n (vetor_coluna[direcao] == -1 and c == 0) or\n (matriz[l + vetor_linha[direcao]][c + vetor_coluna[direcao]] != 0)):\n direcao = (direcao + 1) % 4\n l += vetor_linha[direcao]\n c += vetor_coluna[direcao]\n return matriz",
"def colocar_palabra(matrix, palabra, esfila, pos, inicio) :\n for x in range(inicio, inicio+len(palabra) ) :\n if esfila:\n matrix[pos][x] = palabra[x-inicio]\n else:\n matrix[x][pos] = palabra[x-inicio]\n return matrix",
"def subsets(conjunto: list, matriz_resposta: list, capacidade: int) -> list:\n\n starts = [linha for linha in range(len(conjunto)+1) if matriz_resposta[linha][capacidade]]\n\n resultados = list()\n append = resultados.append\n for linha in starts:\n coluna = capacidade\n \n subconjunto = set()\n add = subconjunto.add\n\n while coluna >= 0 and linha >= 0:\n if (coluna - conjunto[linha-1]) > 0 and coluna == capacidade:\n coluna -= conjunto[linha-1]\n linha -= 1\n add(conjunto[linha])\n elif matriz_resposta[linha][coluna] == 1:\n linha -= 1\n else:\n coluna -= conjunto[linha]\n add(conjunto[linha])\n\n if sum(subconjunto) == capacidade and subconjunto not in resultados:\n append(subconjunto)\n\n return resultados",
"def subset_sum(conjunto: list, capacidade: int) -> list:\n max_coluna = capacidade + 1\n max_linha = len(conjunto) + 1\n\n matriz_resposta = [[0]*max_coluna for i in range(max_linha)]\n\n for linha in range(max_linha):\n matriz_resposta[linha][0] = 1\n\n for linha in range(1, max_linha):\n for coluna in range(1, max_coluna):\n if conjunto[linha-1] > coluna:\n resposta = matriz_resposta[linha-1][coluna]\n else:\n resposta = matriz_resposta[linha-1][coluna] or matriz_resposta[linha-1][coluna-conjunto[linha-1]]\n\n matriz_resposta[linha][coluna] = resposta\n\n return matriz_resposta",
"def espiral(matriz):\n result = [] # Lista resultado\n\n indice_central = indice_central = len(matriz) // 2\n \n # Coordenadas\n x = indice_central\n y = indice_central\n\n pasos = 1\n\n direcciones = [\"derecha\", \"abajo\", \"izquierda\", \"arriba\"]\n contador_direccion = 0\n\n\n\n # Agregamos el numero central ya a la lista antes de empezar\n result.append(matriz[x][y])\n print(result)\n iteraciones = 2 \n\n for k in range(len(matriz)*len(matriz)):\n\n for j in range(iteraciones):\n \n for i in range(pasos):\n direccion_actual = direcciones[contador_direccion]\n\n # Realizando el movimiento \n if(direccion_actual == direcciones[0]):\n # Hacia la derecha\n x = x\n y = y + 1 \n \n if (len(matriz)) == y: # Verificamos si es que ya nos salimos de la matriz\n # Por aqui es la unica manera de salir\n return result\n\n elif direccion_actual == direcciones[1]:\n # Hacia abajo\n x = x + 1\n y = y \n\n elif direccion_actual == direcciones[2]:\n # Hacia la izquierda\n x = x \n y = y - 1\n\n elif direccion_actual == direcciones[3]:\n # Hacia arriba\n x = x - 1\n y = y \n\n else:\n # Significa que direccion_actual tiene el valor de 4, y se ha salido de la lista, \n # por lo que debemos asumir que la direccion actual es 0, hacia la derecha\n contador_direccion = 0\n # Hacia la derecha\n x = x + 1\n y = y \n \n if (len(matriz)) == y: # Verificamos si es que ya nos salimos de la matriz\n # Por aqui es la unica manera de salir\n return result\n\n result.append(matriz[x][y])\n\n\n # Actualizamos la direccion actual para la siguiente iteracion\n contador_direccion = contador_direccion + 1\n if(contador_direccion == 4):\n contador_direccion = 0\n\n # La cantidad de pasos despues de dos iteraciones aumenta en uno\n pasos += 1\n\n return None",
"def procesar_palabras(matrix, nxn, palabras):\n salteadas = 0\n posiciones = []\n # direccion -- bool indica la direccion de la palabra\n # posicion \n # sentido_inverso -- bool indica el sentido de la palabra\n for i in range(len(palabras)):\n posicion_inicial = random.randint(0,nxn-1)\n sentido_inverso = bool(random.randint(0,1))\n if i == 0:\n direccion_inicial = False # Primer palabra siempre Vertical\n elif i == 1:\n direccion_inicial = True # Segunda palabra Horizontal\n sentido_inverso = True # Segunda palabra Invertida\n else:\n direccion_inicial = bool(random.randint(0,1))\n posicion = posicion_inicial\n direccion = direccion_inicial\n if sentido_inverso:\n palabras[i] = palabras[i][::-1]\n colocada = False\n while(not colocada):\n # Siempre Par\n valores_en_posicion = valores_posicion(matrix, nxn, direccion, posicion)\n for e in range(len(valores_en_posicion)/2):\n # margen random a la palabra\n if int(valores_en_posicion[e*2]) >= len(palabras[i]) :\n margen = int(valores_en_posicion[e*2]) - len(palabras[i])\n if margen > 0:\n inicio = random.randint(0,margen)\n matrix = colocar_palabra(matrix, palabras[i], direccion, posicion, margen)\n if direccion :\n fila_inicio = posicion\n columna_inicio = margen\n fila_final = posicion\n columna_final = margen + len(palabras[i])-1\n else:\n columna_inicio = posicion\n fila_inicio = margen\n columna_final = posicion\n fila_final = margen + len(palabras[i])-1\n if sentido_inverso :\n aux = fila_final\n fila_final = fila_inicio\n fila_inicio = aux\n aux = columna_final\n columna_final = columna_inicio\n columna_inicio = aux \n # Alternativa \"legible\" a las posiciones\n # posiciones\n posiciones.append(str(columna_inicio)+str(fila_inicio)+str(columna_final)+str(fila_final))\n colocada = True\n print \"colocada\"\n break\n if not colocada:\n print \"no colocada\"\n # Si en esa posicion no entra, probar en la siguiente\n if posicion < nxn-1: posicion += 1\n else: posicion = 0\n # prueba todas las posiciones, cambiar direccion y probar de nuevo\n if posicion == posicion_inicial :\n direccion = not direccion\n # Si cambiar la direccion y probar en todas las posiciones tampoco sirve, entonces saltear palabra\n if direccion == direccion_inicial:\n salteadas+=1\n # Si, \"Break\" porque el while esta dentro del For.\n break\n if salteadas != 0 : return matrix,posiciones,salteadas\n else: return matrix,posiciones,0",
"def tri_si_rencontre(self, joueurs_tries, liste_rencontres, nb_joueurs):\n # We recover the possibilities\n for x in joueurs_tries:\n liste_dict = []\n for y in joueurs_tries:\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n continue\n else:\n liste_dict.append(y)\n self.dict_possiblity[x] = liste_dict\n copy_joueurs = list(joueurs_tries)\n liste_finale = []\n nb_tour = 0\n error = False\n while joueurs_tries:\n x = joueurs_tries[0]\n for y in joueurs_tries:\n if nb_tour > nb_joueurs**2:\n print(\"Il y a une erreur dans l'algorithme.\")\n error = True\n break\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n nb_tour += 1\n continue\n else:\n i = 0\n # we are looking for a unique possibility\n for key in list(self.dict_possiblity):\n if len(self.dict_possiblity[key]) == 1:\n valeur = self.dict_possiblity[key][0]\n liste_finale.append((key, valeur))\n liste_rencontres.append((key, valeur))\n joueurs_tries.remove(key)\n joueurs_tries.remove(valeur)\n self.sup_dicti(valeur, key)\n i += 1\n break\n if i > 0:\n break\n # we remove both of the possibilities\n self.sup_dicti(x, y)\n liste_finale.append((x, y))\n liste_rencontres.append((x, y))\n joueurs_tries.remove(y)\n joueurs_tries.remove(x)\n break\n if error:\n liste_finale = Vue().demander_binomes(copy_joueurs,\n nb_joueurs)\n return liste_finale\n return liste_finale",
"def eleven():\r\n \r\n matrix = [[8, 2, 22, 97, 38, 15, 0, 40, 0, 75, 4, 5, 7, 78, 52, 12, 5, 77, 91, 8],\r\n [49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 4, 56, 62, 0],\r\n [81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 3, 49, 13, 36, 65],\r\n [52, 70, 95, 23, 4, 60, 11, 42, 69, 24, 68, 56, 1, 32, 56, 71, 37, 2, 36, 91],\r\n [22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80],\r\n [24, 47, 32, 60, 99, 03, 45, 2, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50],\r\n [32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70],\r\n [67, 26, 20, 68, 2, 62, 12, 20, 95, 63, 94, 39, 63, 8, 40, 91, 66, 49, 94, 21],\r\n [24, 55, 58, 5, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72],\r\n [21, 36, 23, 9, 75, 0, 76, 44, 20, 45, 35, 14, 0, 61, 33, 97, 34, 31, 33, 95],\r\n [78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 3, 80, 4, 62, 16, 14, 9, 53, 56, 92],\r\n [16, 39, 5, 42, 96, 35, 31, 47, 55, 58, 88, 24, 0, 17, 54, 24, 36, 29, 85, 57],\r\n [86, 56, 0, 48, 35, 71, 89, 7, 5, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58],\r\n [19, 80, 81, 68, 5, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 4, 89, 55, 40],\r\n [4, 52, 8, 83, 97, 35, 99, 16, 7, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66],\r\n [88, 36, 68, 87, 57, 62, 20, 72, 3, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69],\r\n [4, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 8, 46, 29, 32, 40, 62, 76, 36],\r\n [20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 4, 36, 16],\r\n [20, 73, 35, 29, 78, 31, 90, 1, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 5, 54],\r\n [1, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 1, 89, 19, 67, 48]]\r\n \r\n greatest = 0\r\n product = 1\r\n \r\n # Find highest product of four left to right numbers\r\n for i in range(20):\r\n j = 0\r\n while j < 17:\r\n for k in range(4):\r\n product *= matrix[i][j + k]\r\n if product > greatest:\r\n greatest = product\r\n product = 1\r\n j += 1\r\n \r\n # Find highest product of four up and down numbers\r\n for i in range(20):\r\n j = 0\r\n while j < 17:\r\n for k in range(4):\r\n product += matrix[j + k][i]\r\n if product > greatest:\r\n greatest = product\r\n product = 1\r\n j += 1\r\n \r\n # Find highest product of four diagonal up/down numbers\r\n for i in range(17):\r\n j = 0\r\n while j < 17:\r\n for k in range(4):\r\n product *= matrix[i + k][j + k]\r\n if product > greatest:\r\n greatest = product\r\n product = 1\r\n j += 1\r\n \r\n # Find highest product of four diagonal down/up numbers\r\n for i in range(3, 20):\r\n j = 0\r\n while j < 17:\r\n for k in range(4):\r\n product *= matrix[i - k][j + k]\r\n if product > greatest:\r\n greatest = product\r\n product = 1\r\n j += 1\r\n \r\n \r\n #for i in range(17):\r\n # for j in range(4):\r\n # print matrix[i + j][0],\r\n # print\r\n \r\n return greatest",
"def colocar_especial(tablero_juego, filas, columnas, especiales_nivel, datos_de_especiales, obstaculos, posicion_fruta, posicion_serpiente):\n color_normal = '\\033[0m'\n color_azul = '\\033[34m'\n especial_colocado = choice(especiales_nivel)\n especial = datos_de_especiales[especial_colocado]\n while True:\n posicion_especial = [randint(0, filas-1), randint(0, columnas-1)]\n if not any((esta_contenido_o_igual(posicion_especial, obstaculos), \n esta_contenido_o_igual(posicion_especial, posicion_serpiente), \n esta_contenido_o_igual(posicion_especial, posicion_fruta))): break\n tablero_juego[posicion_especial[0]][posicion_especial[1]] = color_azul + especial_colocado + color_normal\n return posicion_especial, especial_colocado",
"def gerar_colunas(matriz):\n colunas_matriz = []\n for d in range(qtde_problemas):\n coluna_matriz = []\n for c in range(len(matriz)):\n coluna_matriz.append(matriz[c][d])\n colunas_matriz.append(coluna_matriz.copy())\n return colunas_matriz",
"def traspo(*args):\r\n filas,columnas = len(args),len(args[0])\r\n matriz_r = []\r\n for i in range(0,filas):\r\n lista_aux = []\r\n for j in range(0,columnas):\r\n lista_aux.append(args[j][i])\r\n matriz_r.append(lista_aux)\r\n return matriz_r",
"def casas_com_wumpus(self):\n casas_com_wumpus = []\n for x in range(self.tamanho):\n for y in range(self.tamanho):\n if self.tabuleiro[x][y] == WUMPUS:\n casas_com_wumpus.append((x, y))\n return casas_com_wumpus",
"def procesar_coordenadas(entradas, nxn, posiciones, matrix):\n columna_inicio = entradas[0][0]\n fila_inicio = int(entradas[0][1:])-1\n\n columna_final = entradas[1][0]\n fila_final = int(entradas[1][1:])-1\n if fila_final >= nxn or fila_inicio >= nxn :\n return False, obtener_mensaje(\"rango_fila\"), posiciones, matrix\n # 65 es el valor decimal de la letra A en la tabla ASCII, la cual coincide con el origen de coordenadas de nuestro tablero.\n if ord(columna_inicio.upper())-65 >= nxn or ord(columna_final.upper())-65 >= nxn :\n return False, obtener_mensaje(\"rango_columna\"), posiciones, matrix\n else:\n columna_inicio = ord(columna_inicio.upper())-65\n columna_final = ord(columna_final.upper())-65\n if columna_inicio != columna_final and fila_inicio != fila_final :\n return False, obtener_mensaje(\"fila_o_columa_igual\"), posiciones, matrix\n if columna_inicio == columna_final :\n vertical = True\n else:\n vertical = False\n # Se arma una cadena con las coordenadas de la misma forma como fue agregada anteriormente a la lista \"posiciones\"\n cadena_de_posicion = str(columna_inicio)+str(fila_inicio)+str(columna_final)+str(fila_final)\n #============== Comprobar si existe la palabra. Removerla en caso afirmativo\n if not cadena_de_posicion in posiciones :\n return False, obtener_mensaje(\"error_coordenadas\"), posiciones, matrix\n posiciones.remove(cadena_de_posicion)\n #============== Mostrar la palabra en mayuscula\n if vertical :\n for fila in range(min(fila_inicio, fila_final),max(fila_inicio, fila_final)+1) : \n matrix[fila][columna_final] = str(matrix[fila][columna_final]).upper()\n else:\n for columna in range(min(columna_inicio, columna_final),max(columna_inicio, columna_final)+1) :\n matrix[fila_inicio][columna] = str(matrix[fila_inicio][columna]).upper()\n return True, \"\", posiciones, matrix",
"def clear_board(key, value, checker):\n # one_left.clear() # clears list every time it checks\n row, col = find_box(key) # checks for box\n for x in rows[row]:\n for y in cols[col]:\n # print(checker[x + str(y)]) # TODO delete later!\n # print(value)\n # print(\"hi\")\n if str(x + str(y)) in checker: # if this element is 0 ###\n\n if value in checker[x + str(y)] and str(x + str(y)) != key:\n checker[x + str(y)].remove(value) # remove value from list\n # if len(checker[x + str(y)]) == 1: # if one element left\n # one_left.append(x + str(y)) # only relevant for preprocessing\n if len(checker[x + str(y)]) == 0: # if this is invalid board\n return False\n for x in ROW: # checks for column\n if str(x + str(key[1])) in checker:\n if str(x + str(key[1])) != key and value in checker[x + str(key[1])]: # board[x + str(key[1])] == value:\n # if len(checker[x + str(key[1])]) > 0:\n checker[x + str(key[1])].remove(value)\n # if len(checker[x + str(key[1])]) == 1: # if one element left\n # one_left.append(x + str(key[1]))\n if len(checker[x + str(key[1])]) == 0: # if this is invalid board\n return False\n for i in COL: # checks for row\n if str(key[0] + str(i)) in checker:\n if str(key[0] + str(i)) != key:\n if value in checker[key[0] + str(i)]: # board[key[0] + str(i)] == value:\n # if len(checker[key[0] + str(i)]) > 0:\n # print('value:') ##\n # print(value) ##\n # print('array:') ##\n # print(checker[key[0] + str(i)])\n checker[key[0] + str(i)].remove(value)\n # if len(checker[key[0] + str(i)]) == 1: # if one element left\n # one_left.append(key[0] + str(i))\n if len(checker[key[0] + str(i)]) == 0: # if this is invalid board\n return False\n # return False\n # adjust(checker) #####\n return True # then the new heuristics work!!",
"def getCaseDisp():\r\n liste = []\r\n for i in range(3):\r\n for j in range(3):\r\n if Grille[i][j] == 0:\r\n liste.append([i,j])\r\n return liste",
"def getCambiosQafectanCaja(self, fechaInicio, fechaFin, usuarioColaborador=\"\"):\n\tif usuarioColaborador == \"\" and fechaInicio == \"\" and fechaFin == \"\":\n\t return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\"\"\")\n elif usuarioColaborador == \"\":\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\"\"\" %(fechaInicio,fechaFin))\n else:\n return self.conexion.ejecutarSQL(\"\"\"select c.id, c.fecha, c.hora, c.codigo_Producto_entra, c.codigo_Producto_sale, c.id_Venta, c.excedente, c.usuario_Colaborador\n from cambios c, ventas v\n where c.id_Venta = v.id\n and c.fecha != v.fecha\n and c.fecha between '%s' and '%s'\n and c.usuario_Colaborador = '%s'\"\"\" %(fechaInicio,fechaFin,usuarioColaborador))",
"def procesar_juego(matrix,nxn,n_palabras,salteadas,posiciones):\n palabras_restantes = n_palabras\n msg_to_show = \"\"\n\n while palabras_restantes > 0:\n clear_window()\n show_title(\"Encuentre las palabras\")\n # Si por parametro se indica que existen palabras salteadas, mostramos un mensaje\n if salteadas != None:\n show_msg(\"Palabras restantes: %d Salteadas: %d \\n\"%(palabras_restantes, salteadas))\n else:\n show_msg(\"Palabras restantes: %d \\n\"%palabras_restantes)\n mostrar_tablero(matrix, nxn)\n # Mostramos el mensaje y le agregamos una linea nueva\n if msg_to_show != \"\":\n show_msg(msg_to_show+\"\\n\")\n msg_to_show = \"\"\n coordenadas, msg_to_show = pedir_coordenadas()\n if not coordenadas : continue\n encontrada, msg_to_show, posiciones, matrix = procesar_coordenadas(coordenadas, nxn, posiciones, matrix)\n if not encontrada : continue\n else:\n palabras_restantes -= 1\n msg_to_show = \"Muy Bien! Encontraste una palabra!\"\n mostrar_fin_juego(n_palabras)\n return True",
"def escolhe_iin_comp(abv): \n \n # Vai ser usado o tuplo que contem todas as informacoes sobre os diferentes tipos de cartao definido nas linhas de codigo iniciais.\n # Sao acedidas as informacoes no indice 1 (Abreviatura), 2 (Digitos iniciais IIN) e 3 (Numero de Digitos). \n \n # Iremos percorrer o tuplo com todas as informacoes sobre os tipos de cartao. Quando se chegar a informacao correspondente a entidade emissora introduzida, escolhemos aleatoriamente os digitos iniciais e o comprimento do cartao.\n \n \n for e in t_cartoes:\n \n if e[1] == abv:\n dig_in = e[2][int(random() * len(e[2]))]\n comp = int(e[3][int(random() * len(e[3]))]) \n \n return (dig_in,comp)",
"def testPerfilCasoInterseccionesVarias(self):\n #se prueba el mismo conjunto introducido en distinto orden\n if self.TESTALL:\n pe1 = [1, 7, 12, 0]\n pe2 = [3, 9, 5, 0]\n pe3 = [8, 10, 17, 0]\n combo1 = pe1 + pe2 + pe3\n combo2 = pe1 + pe3 + pe2\n combo3 = pe2 + pe1 + pe3\n combo4 = pe3 + pe2 + pe1\n combo5 = pe2 + pe3 + pe1\n combo6 = pe3 + pe2 + pe1\n listaCombos = []\n listaCombos.append(combo1)\n listaCombos.append(combo2)\n listaCombos.append(combo3)\n listaCombos.append(combo4)\n listaCombos.append(combo5)\n listaCombos.append(combo6)\n \n resultadoEsperado = [1, 7, 3, 9, 5, 7, 8, 10, 17]\n perfil = Perfil.Perfil()\n for perfilOriginal in listaCombos:\n resultado = perfil.calcularPerfil(perfilOriginal, 0)\n self.assertEqual(resultadoEsperado, resultado)",
"def Permite_salto(self,coordenada:list,color:int) -> bool:\n\n\t\t#coordenadas\n\t\tfila = coordenada[0]\n\t\tcolumna = coordenada[1]\n\n\t\t#lista de direciones en las que se podia saltar\n\t\tkey=[False]*8\n\n\t\t#La coordenada en donde se desea posicionar la ficha debe ser adyacente a alguna ficha ya puesta\n\t\tif self.Es_adyacente(coordenada):\n\n\t\t\t#Por cada direccion se analiza si permite el salto\n\n\t\t\t#abajo\n\t\t\taux = columna+1\n\t\t\tif aux<5 and aux>0:\n\t\t\t\tif color==1:\n\t\t\t\t\tif self.tablero[fila][aux]==2:\n\t\t\t\t\t\tfor i in range(5-aux):\n\t\t\t\t\t\t\tif self.tablero[fila][aux+i+1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,aux],[fila,aux+i+1],\"derecha\",1)\n\t\t\t\t\t\t\t\tkey[0]=True\n\t\t\t\tif color==2:\n\t\t\t\t\tif self.tablero[fila][aux]==1:\n\t\t\t\t\t\tfor i in range(5-aux):\n\t\t\t\t\t\t\tif self.tablero[fila][aux+i+1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,aux],[fila,aux+i+1],\"derecha\",2)\n\t\t\t\t\t\t\t\tkey[0]=True\n\t\t\t#arriba\n\t\t\taux = columna-1\n\t\t\tif aux<5 and aux>0:\n\t\t\t\tif color==1:\n\t\t\t\t\tif self.tablero[fila][aux]==2:\n\t\t\t\t\t\tfor i in range(aux):\n\t\t\t\t\t\t\tif self.tablero[fila][aux-i-1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,aux],[fila,aux-i-1],\"izquierda\",1)\n\t\t\t\t\t\t\t\tkey[1]=True\n\t\t\t\tif color==2:\n\t\t\t\t\tif self.tablero[fila][aux]==1:\n\t\t\t\t\t\tfor i in range(aux):\n\t\t\t\t\t\t\tif self.tablero[fila][aux-i-1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,aux],[fila,aux-i-1],\"izquierda\",2)\n\t\t\t\t\t\t\t\tkey[1]=True\n\t\t\t#derecha\n\t\t\taux = fila+1\n\t\t\tif aux<5 and aux>0:\n\t\t\t\tif color==1:\n\t\t\t\t\tif self.tablero[aux][columna]==2:\n\t\t\t\t\t\tfor i in range(5-aux):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][columna]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,columna],\"abajo\",1)\n\t\t\t\t\t\t\t\tkey[2]=True\n\t\t\t\tif color==2:\n\t\t\t\t\tif self.tablero[aux][columna]==1:\n\t\t\t\t\t\tfor i in range(5-aux):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][columna]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,columna],\"abajo\",2)\n\t\t\t\t\t\t\t\tkey[2]=True\n\t\t\t#izquierda\n\t\t\taux = fila-1\n\t\t\tif aux<5 and aux>0:\n\t\t\t\tif color==1:\n\t\t\t\t\tif self.tablero[aux][columna]==2:\n\t\t\t\t\t\tfor i in range(aux):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][columna]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,columna],\"arriba\",1)\n\t\t\t\t\t\t\t\tkey[3]=True\n\t\t\t\tif color==2:\n\t\t\t\t\tif self.tablero[aux][columna]==1:\n\t\t\t\t\t\tfor i in range(aux):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][columna]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,columna],\"arriba\",2)\n\t\t\t\t\t\t\t\tkey[3]=True\n\t\t\t#abajo-izquerda\n\t\t\taux = fila-1\n\t\t\taux2 = columna+1\n\t\t\tif aux<5 and aux>0 and aux2<5 and aux2>0:\n\t\t\t\tpar = [aux,5-aux2]\n\t\t\t\tif color==1:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==2:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][aux2+i+1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,aux2+i+1],\"arriba-derecha\",1)\n\t\t\t\t\t\t\t\tkey[4]=True\n\t\t\t\tif color==2:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==1:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][aux2+i+1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,aux2+i+1],\"arriba-derecha\",2)\n\t\t\t\t\t\t\t\tkey[4]=True\n\n\t\t\t#arriba-izquierda\n\t\t\taux = fila-1\n\t\t\taux2 = columna-1\n\t\t\tif aux<5 and aux>0 and aux2<5 and aux2>0:\n\t\t\t\tpar = [aux,aux2]\n\t\t\t\tif color==1:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==2:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][aux2-i-1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,aux2-i-1],\"arriba-izquierda\",1)\n\t\t\t\t\t\t\t\tkey[5]=True\n\t\t\t\tif color==2:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==1:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][aux2-i-1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,aux2-i-1],\"arriba-izquierda\",2)\n\t\t\t\t\t\t\t\tkey[5]=True\n\t\t\t#abajo-derecha\n\t\t\taux = fila+1\n\t\t\taux2 = columna+1\n\t\t\tif aux<5 and aux>0 and aux2<5 and aux2>0:\n\t\t\t\tpar = [5-aux,5-aux2]\n\t\t\t\tif color==1:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==2:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][aux2+i+1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,aux2+i+1],\"abajo-derecha\",1)\n\t\t\t\t\t\t\t\tkey[6]=True\n\t\t\t\tif color==2:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==1:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][aux2+i+1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,aux2+i+1],\"abajo-derecha\",2)\n\t\t\t\t\t\t\t\tkey[6]=True\n\t\t\t#arriba-derecha\n\t\t\taux = fila+1\n\t\t\taux2 = columna-1\n\t\t\tif aux<5 and aux>0 and aux2<5 and aux2>0:\n\t\t\t\tpar = [5-aux,aux2]\n\t\t\t\tif color==1:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==2:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][aux2-i-1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,aux2-i-1],\"abajo-izquierd\",1)\n\t\t\t\t\t\t\t\tkey[7]=True\n\t\t\t\tif color==2:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==1:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][aux2-i-1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,aux2-i-1],\"abajo-izquierda\",2)\n\t\t\t\t\t\t\t\tkey[7]=True\n\t\t\t\n\t\t\tif True in key:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn False",
"def voisins_couloir():\n global tableau\n voisins_milieu = 0\n for v in range(3, 4):\n for w in range(NB_LINE):\n if tableau[v][w] != 0 and [v, w] != [i, j]:\n voisins_milieu += 1\n return voisins_milieu",
"def test_obtener_lista_ciudades_transformadas(self):\n lista = [{\"id\": 707860, \"name\": \"Hurzuf\", \"country\": \"UA\", \"coord\": {\"lon\": 34.283333, \"lat\": 44.549999}}, {\"id\": 519188, \"name\": \"Novinki\", \"country\": \"RU\", \"coord\": {\"lon\": 37.666668, \"lat\": 55.683334}}]\n lista_transformada = CIUDADES_CONTROLLER.obtenerListaCiudades(lista)\n lista_esperada = [{\"id\": 707860, \"name\": \"Hurzuf\", \"country\": \"UA\"}, {\"id\": 519188, \"name\": \"Novinki\", \"country\": \"RU\"}]\n self.assertEqual(len(lista_transformada), len(lista))\n self.assertEqual(lista_esperada, lista_transformada)",
"def solveSudoku(self, board: List[List[str]]) -> None:\n row, col, part = [set() for _ in range(9)], [set() for _ in range(9)], [set() for _ in range(9)]\n blank = []\n for i in range(9):\n for j in range(9):\n if board[i][j] != \".\":\n row[i].add(board[i][j])\n col[j].add(board[i][j])\n part[i//3 * 3 + j//3].add(board[i][j])\n else:\n blank.append([i, j])\n def recursion(row, col, part, blank, board, count, n):\n if count == n:\n return True\n else:\n x, y = blank.pop()\n for c in range(1, 10):\n c = str(c)\n if c not in row[x] and c not in col[y] and c not in part[x//3 * 3 + y//3]:\n row[x].add(c)\n col[y].add(c)\n part[x//3 * 3 + y//3].add(c)\n board[x][y] = c\n count += 1\n check = recursion(row, col, part, blank, board, count, n)\n if check:\n return check\n row[x].remove(c)\n col[y].remove(c)\n part[x//3 * 3 + y//3].remove(c)\n board[x][y] = \".\"\n count -= 1\n blank.append([x,y])\n return False\n count, n = 0, len(blank)\n recursion(row, col, part, blank, board, count, n)",
"def add_conec_listoflists(self, conec):\n self.num = len(conec) # numero de elementos en conec\n self.ne = [] # numero de elem1 por cada elem0]\n self.je = [] # aca pongo la conectividad en un array chorizo\n self.len_je = 0\n for elem0 in conec:\n num_elem1 = 0 # numero de elem1 en elem0\n for item1 in elem0:\n num_elem1 += 1 # sumo un elem1 conectado a elem0\n self.je.append(item1) # lo agrego a la conectividad\n self.len_je += 1\n self.ne.append(num_elem1) # agrego el numero de elem1 de elem0 a la lista",
"def test_cmatrix_list(self):\n\n test_dtraj = [np.array([0, 1, 1, 0, 0, 0, 1, 1, 1, 1]), \\\n np.array([0,1,1,1,0])]\n\n cmatrix_compare = np.array([[2., 3.], [2., 6.]])\n cmatrix_computed = cmatrix(test_dtraj)\n self.assertTrue(np.allclose(cmatrix_compare, cmatrix_computed))",
"def arredondar(self):\n for y in range(self.altura):\n for x in range(self.largura):\n for i in range(3):\n self.lista[x][y][i] = int(self.lista[x][y][i])",
"def test_get_cheap_hash(get_all_structures):\n comp_matrix = np.zeros((len(get_all_structures), len(get_all_structures)))\n for i, structure_a in enumerate(get_all_structures):\n for j, structure_b in enumerate(get_all_structures):\n if i < j:\n hash_a = get_cheap_hash(structure_a)\n hash_b = get_cheap_hash(structure_b)\n if hash_a == hash_b:\n comp_matrix[i][j] = 1\n else:\n comp_matrix[i][j] = 0\n assert sum(comp_matrix) == sum(np.diag(comp_matrix))",
"def checkMatrix(i,j):\n # print(i,j)\n counter = Counter([grid[di][dj] for di in range(i,i+3) for dj in range(j,j+3)])\n for k in range(1,10):\n if counter[k] != 1:\n return False\n\n rows_sum = [sum(grid[k][j:j+3]) for k in range(i,i+3)]\n # print(rows_sum)\n if not all(m == 15 for m in rows_sum):\n return False\n cols_sum = [sum(grid[q][k] for q in range(i,i+3)) for k in range(j,j+3)]\n # print(cols_sum)\n if not all(m == 15 for m in cols_sum):\n return False\n dgl_sum = sum(grid[i+k][j+k] for k in range(3))\n anti_dgl_sum = sum(grid[i+k][j+2-k] for k in range(3))\n # print(dgl_sum, anti_dgl_sum)\n if dgl_sum != 15 or anti_dgl_sum != 15:\n return False\n return True",
"def solveSudoku(self, board: List[List[str]]) -> None:\n\n vertical = [set() for i in range(len(board))]\n horizontal = [set() for i in range(len(board))]\n little = [set() for i in range(9)]\n\n def get_little_index(x, y):\n if x < 3:\n little_index = y // 3\n elif x < 6:\n little_index = 3 + y // 3\n else:\n little_index = 6 + y // 3\n return little_index\n\n def add(x, y, i):\n board[x][y] = i\n\n little_index = get_little_index(x, y)\n vertical[y].add(i)\n horizontal[x].add(i)\n little[little_index].add(i)\n\n def search(x, y):\n print(x, y)\n if x == 9:\n return True\n\n little_index = get_little_index(x, y)\n if board[x][y] == '.':\n for i in range(1, 9):\n ii = str(i)\n if ii not in vertical[y] and ii not in horizontal[x] and ii not in little[little_index]:\n add(x, y, ii)\n\n if y + 1 < 9:\n find = search(x, y + 1)\n else:\n find = search(x + 1, 0)\n if find:\n return find\n\n vertical[y].remove(ii)\n horizontal[x].remove(ii)\n little[little_index].remove(ii)\n board[x][y] = '.'\n return False\n else:\n if y + 1 < 9:\n return search(x, y + 1)\n else:\n return search(x + 1, 0)\n\n for i in range(9):\n for j in range(9):\n if board[i][j] != '.':\n add(i, j, board[i][j])\n\n search(0, 0)"
]
| [
"0.5899558",
"0.5898364",
"0.584221",
"0.56435746",
"0.54986304",
"0.5476072",
"0.5394028",
"0.5356176",
"0.5284666",
"0.51707196",
"0.5153619",
"0.51265126",
"0.51237506",
"0.5084624",
"0.50804573",
"0.5059511",
"0.5059344",
"0.5040672",
"0.50087786",
"0.50064075",
"0.4979786",
"0.4976319",
"0.4968946",
"0.4957547",
"0.4946958",
"0.4942285",
"0.49410915",
"0.49309918",
"0.4928882",
"0.4911458"
]
| 0.666221 | 0 |
The function determines whether dealer has a card that would qualify for discard or not | def dealer_matching(self):
if len([card for card in self.dealer_hand if card[1] == '8']) > 0:
self.discard_pile = [card for card in self.dealer_hand if card[1] == '8'][0]
self.dealer_hand.remove(self.discard_pile)
dealer_suits = [card[0] for card in self.dealer_hand]
self.new_suit = max(set(dealer_suits), key=dealer_suits.count)
print("\nNew suit is :", self.new_suit)
return 1
if self.new_suit != '':
matching = []
for card in self.dealer_hand:
if card[0] == self.new_suit:
matching.append(card)
if len(matching) > 0:
matching_values = list(map(self.card_value, matching))
self.discard_pile = matching[matching_values.index(max(matching_values))]
self.dealer_hand.remove(self.discard_pile)
self.new_suit = ''
return 1
else:
return 0
if self.new_suit == '':
matching = []
for card in self.dealer_hand:
if card[0] == self.discard_pile[0] or card[1] == self.discard_pile[1]:
matching.append(card)
if len(matching) > 0:
matching_values = list(map(self.card_value, matching))
self.discard_pile = matching[matching_values.index(max(matching_values))]
self.dealer_hand.remove(self.discard_pile)
return 1
else:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cardDiscardable(self, card):\n if self.cardDead(card):\n return True\n\n cardAttr = \"\"\n if Suit.toString(card.getSuit()) == \"white\":\n cardAttr = \"w\"\n elif Suit.toString(card.getSuit()) == \"blue\":\n cardAttr = \"b\"\n elif Suit.toString(card.getSuit()) == \"red\":\n cardAttr = \"r\"\n elif Suit.toString(card.getSuit()) == \"green\":\n cardAttr = \"g\"\n elif Suit.toString(card.getSuit()) == \"yellow\":\n cardAttr = \"y\"\n\n if card.getValue() == 1:\n cardAttr += \"1\"\n elif card.getValue() == 2:\n cardAttr += \"2\"\n elif card.getValue() == 3:\n cardAttr += \"3\"\n elif card.getValue() == 4:\n cardAttr += \"4\"\n elif card.getValue() == 5:\n cardAttr += \"5\"\n\n if card.getValue() == 1:\n if self.discardedDict[cardAttr] < 2:\n self.discardedDict[cardAttr] += 1\n # print(3 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 2 or card.getValue() == 3 or card.getValue() == 4:\n if self.discardedDict[cardAttr] < 1:\n self.discardedDict[cardAttr] += 1\n # print(2 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 5:\n if self.discardedDict[cardAttr] < 0:\n self.discardedDict[cardAttr] += 1\n # print(1 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n # print(\"Useful card\")\n return False",
"def still_in_hand(self):\n return len(self.hand.cards)!=0",
"def is_miss_deal(hand: list, mighty: Card) -> bool:\n point_card_count = 0\n for card in hand:\n if card.is_pointcard() and card != mighty:\n point_card_count += 1\n\n if point_card_count <= 1:\n return True\n else:\n return False",
"def discarded(self) -> bool:\n return (\n len(self.cards) == 13 - self.game.board.purple.space - self.discard_amount\n )",
"def indicate_discard_card(whose_turn,players):\n cards_to_choose_from = players[whose_turn].hand.cards\n players[whose_turn].hand.print_cards()\n chosen_to_discard = int(input('Select a card to discard. Type a number. '))\n return chosen_to_discard",
"def player_discard(self, inpt):\n \n if inpt.isdigit() == False:\n return 0\n if int(inpt) > len(self.player_hand):\n print(\"\\nNumber of card entered is greater than number of cards\")\n print(\"Please try again \\n\")\n return 0\n if self.player_hand[int(inpt)-1][1] == '8':\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n while self.new_suit not in ['h','d','s','c']:\n self.new_suit = input(\"Please enter new suit: h, d, s, c\\n\")\n print(\"\\nNew suit is: \", self.new_suit)\n return 1\n if self.new_suit != '':\n if self.player_hand[int(inpt)-1][0] == self.new_suit:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n return 1\n else:\n print(\"\\nYou need to match new suit\")\n print(\"Please try again\\n\")\n return 0\n if self.new_suit == '':\n if self.player_hand[int(inpt)-1][0] == self.discard_pile[0] or \\\n self.player_hand[int(inpt)-1][1] == self.discard_pile[1]:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n return 1\n else:\n print(\"\\nYou need to match discard pile card suit or rank\")\n print(\"Please try again\\n\")\n return 0",
"def choose_card_to_discard(self):\n random.choice(self.hand.card_list).use()",
"def hasBlackjack(self):\n return len(self.cards) == 2 and self.getPoints() == 21",
"def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])",
"def is_blackjack(self):\n if self.hand == 21 and len(list(self)) ==2:\n print '%s = Blackjack'%self\n return True",
"def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")",
"def has_cards(self):\n return self.hand.len() > 0",
"def can_afford_card(self,\n card: Card) -> bool:\n price_after_discount = card.price % self.discount()\n missing_gems = 0\n for gem_color in GemColor:\n if gem_color != GemColor.GOLD:\n missing_gems += max(price_after_discount.value(gem_color) - self.gems_possessed.value(gem_color),0)\n return self.gems_possessed.value(GemColor.GOLD) >= missing_gems",
"def check_for_blackjack(self):\n if (self.dealer.hand.value + self.dealer.face_down.value) == 21:\n if self.player.hand.blackjack:\n return self.blackjack_push()\n else:\n return self.blackjack_dealer_win()\n\n if self.player.hand.blackjack():\n return self.blackjack_player_win()\n lost_insurance_bet(self.side_bet)\n return False",
"def is_blackjack(self) -> bool:\n if self.score == 21 and len(self.cards) == 2:\n return True\n else:\n return False",
"def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)",
"def cardDead(self, card):\n return card.getValue() <= self.field[Suit.toInt(card.getSuit()) - 1]",
"def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)",
"def cardPlayable(self, card):\n return self.field[Suit.toInt(card.getSuit()) - 1] == card.getValue() - 1",
"def discard(self, card: Card) -> None:\n\n success = False\n for i in range(len(self._hand)):\n if self._hand[i] == card:\n self._hand[i] = Card.EMPTY\n success = True\n break\n\n if not success:\n raise ValueError(f\"Player hand does not contain {card.name}\")",
"def prompt_discard(self, num_discards: int, state: 'State'):\n # TODO: Refactor to allow for flexible discarding (see Cellar). Meybe a force discard and a prompt discard?\n while self.hand and num_discards > 0:\n sorted_hand = sorted(list(self.hand), key=card_sort)\n card_name = self.get_input(\n f'Discard {num_discards} cards'\n f'Hand: {sorted_hand}',\n sorted_hand,\n state\n )\n # If the prompted card is in hand, discard it\n card = next((card for card in self.hand if card.name == card_name), None)\n if card:\n self.hand[card] -= 1\n self.hand += Counter() # Remove 0 and negative counts\n self.discard_pile.append(card)\n num_discards -= 1\n print(f'Discarded {card.name}')\n else:\n print(f'{card.name} is not in hand')",
"def test_discard_action(self):\n self.plr.test_input = [\"discard silver\", \"finish selecting\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 2)\n self.assertEqual(self.plr.buys.get(), 1)\n self.assertNotIn(\"Silver\", self.plr.piles[Piles.HAND])",
"def is_royal_flush(hand):\n\n # same suit\n suite = hand[0][1]\n count = {c:0 for c in cards.keys()}\n for c in hand:\n if suite != c[1]:\n return False\n count[c[0]] += 1\n # all in same suit\n for c in 'T J Q K A'.split():\n if count[c] != 1:\n return False\n return True",
"def is_soft_hand(self):\n is_soft = False\n for i in self.cards:\n if i.value == 'ACE':\n is_soft = True\n\n return is_soft",
"def does_player_have_card(self, player, card):\n return card in self.hands[player]",
"def check_cards_eligibility(self):\n for c in self.hand:\n c.check_actions(self)\n for c in self.phand:\n c.check_actions(self)\n for c in self.discard:\n c.check_actions(self)\n for c in self.active_player.phand:\n c.check_actions(self)\n for c in self.active_player.hand:\n c.check_actions(self)\n for c in self.active_player.discard:\n c.check_actions(self)\n for c in self.played_user_cards:\n c.check_actions(self)\n if ACTION_KEEP in self.actions:\n for p in self.players:\n for c in p.phand:\n c.check_actions(self)\n for c in p.hand:\n c.check_actions(self)\n for c in p.discard:\n c.check_actions(self)",
"def hand_empty(self):\n return len(self.cards) == 0",
"def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))",
"def deal_card(self):\n return self._deal(1)[0]",
"def test_discard(self):\r\n deck_size = 3\r\n d = Deck(deck_size)\r\n for _ in range(deck_size):\r\n d.draw()\r\n d.discard([1, 3])\r\n drawn = d.draw(2)\r\n self.assertEqual(len(drawn), 2)\r\n self.assertIn(1, drawn)\r\n self.assertIn(3, drawn)"
]
| [
"0.8037587",
"0.6778592",
"0.6672884",
"0.6595173",
"0.6592071",
"0.6523255",
"0.6504276",
"0.6475008",
"0.6374864",
"0.636109",
"0.6359076",
"0.6343342",
"0.63184786",
"0.62778324",
"0.6257041",
"0.62546676",
"0.62442625",
"0.6229118",
"0.6229065",
"0.62240404",
"0.6197725",
"0.6178569",
"0.6170008",
"0.61108565",
"0.60824317",
"0.6047245",
"0.6045678",
"0.6022187",
"0.601746",
"0.6015189"
]
| 0.68221676 | 1 |
Test the updating of calendarfreebusyset xattrs on inboxes | def test_freeBusyUpgrade(self):
self.setUpInitialStates()
directory = self.directory
#
# Verify these values require no updating:
#
# Uncompressed XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/BB05932F-DCE7-4195-9ED4-0896EAFF3B0B/calendar</href>\r\n</calendar-free-busy-set>\r\n"
self.assertEquals((yield updateFreeBusySet(value, directory)), None)
# Zlib compressed XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/BB05932F-DCE7-4195-9ED4-0896EAFF3B0B/calendar</href>\r\n</calendar-free-busy-set>\r\n"
value = zlib.compress(value)
self.assertEquals((yield updateFreeBusySet(value, directory)), None)
# Pickled XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/BB05932F-DCE7-4195-9ED4-0896EAFF3B0B/calendar</href>\r\n</calendar-free-busy-set>\r\n"
doc = WebDAVDocument.fromString(value)
value = cPickle.dumps(doc.root_element)
self.assertEquals((yield updateFreeBusySet(value, directory)), None)
#
# Verify these values do require updating:
#
expected = "<?xml version='1.0' encoding='UTF-8'?>\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>"
# Uncompressed XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n"
newValue = yield updateFreeBusySet(value, directory)
newValue = zlib.decompress(newValue)
self.assertEquals(newValue, expected)
# Zlib compressed XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n"
value = zlib.compress(value)
newValue = yield updateFreeBusySet(value, directory)
newValue = zlib.decompress(newValue)
self.assertEquals(newValue, expected)
# Pickled XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n"
doc = WebDAVDocument.fromString(value)
value = cPickle.dumps(doc.root_element)
newValue = yield updateFreeBusySet(value, directory)
newValue = zlib.decompress(newValue)
self.assertEquals(newValue, expected)
#
# Shortname not in directory, return empty string
#
expected = "<?xml version='1.0' encoding='UTF-8'?>\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'/>"
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/nonexistent/calendar</href>\r\n</calendar-free-busy-set>\r\n"
newValue = yield updateFreeBusySet(value, directory)
newValue = zlib.decompress(newValue)
self.assertEquals(newValue, expected) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_calendar_query_partial_freebusy(self):\n raise SkipTest(\"test unimplemented\")",
"def test_update_custom_button(self):\n pass",
"def test_calendarsUpgradeWithUIDs(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Plain XML\n freeBusyAttr: \"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\\r\\n</calendar-free-busy-set>\\r\\n\",\n },\n },\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n },\n \"@xattrs\":\n {\n cTagAttr: isValidCTag, # method below\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def testDirtyRefresh(self):\n \n pass",
"def test_checkbox_attr_change(self):\n custom_attribute_values = [{\n \"custom_attribute_id\": self.cad3.id,\n \"attribute_value\": \"1\",\n }]\n response = self.api.put(self.assessment, {\n \"custom_attribute_values\": custom_attribute_values\n })\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"[email protected]\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"].keys(), [\"CA3\"])",
"def test_update_business(self):\n self.business_item_class.businesses_list = [{\"owner\": \"chairman\", \"business_name\":\"Maendeleo\", \"category\":\"Backaend\", \"location\":\"myhomecity\"},\n {\"owner\": \"chairmanwe\", \"business_name\":\"NshMaendeleo\", \"category\":\"Backaend\", \"location\":\"myhomecity\"}]\n msg = self.business_item_class.update_business(\"Christmass\", \"Maendeleo\", \"chairman\")\n self.assertEqual(msg, [{\"owner\": \"chairman\", \"business_name\":\"Christmass\", \"category\":\"Backaend\", \"location\":\"myhomecity\"}])",
"def test_meeting_poll_update(self):\n pass",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def test_calendarsUpgradeWithInboxItems(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n \"oldinboxitem\": {\n \"@contents\": \"\",\n \"@timestamp\": 1, # really old file\n },\n \"newinboxitem\": {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"inboxitems.txt\":\n {\n \"@contents\": None, # ignore contents, the paths inside are random test directory paths\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n \"newinboxitem\": {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def dummy_update( self ):\r\n pass",
"def test_addMgmtObjAttribute(self) -> None:\n\t\tdct = \t{ 'm2m:bat' : {\n\t\t\t\t\t'aa' : [ 'btl', 'bts']\n\t\t\t\t}}\n\t\tr, rsc = UPDATE(batURL, ORIGINATOR, dct)\n\t\tself.assertEqual(rsc, RC.updated)\n\t\tself.assertEqual(findXPath(r, 'm2m:bat/btl'), 42)\n\t\tself.assertEqual(findXPath(r, 'm2m:bat/bts'), 2)\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:bat/aa'))\n\t\tself.assertEqual(len(findXPath(r, 'm2m:bat/aa')), 2)\n\t\tself.assertIn('btl', findXPath(r, 'm2m:bat/aa'))\n\t\tself.assertIn('bts', findXPath(r, 'm2m:bat/aa'))\n\n\t\tr, rsc = RETRIEVE(f'{REMOTEURL}/~{TestRemote_Annc.remoteBatRI}', ORIGINATOR)\n\t\tself.assertEqual(rsc, RC.OK)\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:batA/btl'))\n\t\tself.assertEqual(findXPath(r, 'm2m:batA/btl'), 42)\n\t\tself.assertIsNotNone(findXPath(r, 'm2m:batA/bts'))\n\t\tself.assertEqual(findXPath(r, 'm2m:batA/bts'), 2)",
"def test_calendarsUpgradeWithNoChange(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\\r\\n\"),\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\\r\\n\"),\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def _set_attributes(self):",
"def test_meeting_update(self):\n pass",
"def test_update_ban(self):\n pass",
"def slot_owns_changed(self, orderbook, _dummy):\r\n pass",
"def test_one_object_multi_bndbox(self):\n\n text_num = '123'\n obj = self.root.find('object')\n bndbox = obj.find('bndbox')\n bndbox.find('xmin').text = text_num\n obj.append(bndbox)\n _, boxes = self._test_helper()\n self.assertEqual(len(boxes), 2)\n self.assertNotEqual(boxes[0].xmin, int(text_num))",
"def test_editEvent(self):\n event_a = Event.objects.create(title=\"Christmas meal\",\n start=datetime.strptime(\"2020-12-03 12:00\", \"%Y-%m-%d %H:%M\"),\n end=datetime.strptime(\"2020-12-03 16:00\", \"%Y-%m-%d %H:%M\"),\n event_owner=self.person_a,\n duration=timedelta(hours=4),\n recurrence_interval=0, description=\"happy christmas party\", website_publish=True)\n event_a.invites.add(self.comms_grp)\n event_a.save()\n client = APIClient()\n update_data = {\"event_owner\": self.person_a.pk, \"title\": \"Christmas meal\", \"start\":\n datetime.strptime(\"2020-12-07 12:00\", \"%Y-%m-%d %H:%M\"),\n \"end\": datetime.strptime(\"2020-12-07 16:00\", \"%Y-%m-%d %H:%M\"), \"duration\": timedelta(hours=4),\n \"invites\": [self.comms_grp.pk], \"recurrence_interval\": 0, \"description\": \"Christmas party yahoo\",\n \"website_publish\": False}\n resp = client.put('/api/events/christmas-meal', data=update_data, format='json')\n self.assertEqual(resp.status_code, 200)\n event_check = Event.objects.get(title=\"Christmas meal\")\n self.assertEqual(event_check.description, \"Christmas party yahoo\")",
"def test_email_reminders_set_flags(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 19, 0, tzinfo=dt_timezone.utc\n )\n\n # cancellation period starts 2015/2/12 18:00\n event = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 13, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n cancellation_period=24)\n baker.make_recipe(\n 'booking.booking', event=event, paid=True, payment_confirmed=True,\n )\n _add_user_email_addresses(Booking)\n management.call_command('email_reminders')\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(\n Booking.objects.filter(reminder_sent=True).count(), 1\n )",
"def test_no_update_on_data_element(self):\n no_update = self.admitgen.data.attrib['noupdate']\n self.assertEqual(no_update, '1', 'Incorrect noupdate flag')",
"def test_legacy_items_at_day_1(manager):\n manager.update()\n compare_results_attrs(manager.items, fixtures.FIXTURES[1])",
"def test_client_bank_account_partial_update(self):\n pass",
"def update_availability(self):\n print(\"availability triggered\")\n from users.models import Borrowing\n from search.models import Book\n query = Borrowing.objects.filter(start_date__lte=date.today(),end_date__gte=date.today(),rental_validation=True).select_related('book')\n for b in query:\n Book.objects.filter(uuid=b.book.uuid).update(availability=False)",
"def test_austriansettlements_get(self):\n pass",
"def test_custom_attr_change(self):\n custom_attribute_values = [{\n \"custom_attribute_id\": self.cad1.id,\n \"attribute_value\": \"test value\",\n }]\n response = self.api.put(self.assessment, {\n \"custom_attribute_values\": custom_attribute_values\n })\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"[email protected]\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"].keys(), [\"CA1\"])",
"def test_calendarsUpgradeWithTypes(self):\n\n before = {\n \"calendars\":\n {\n \"users\":\n {\n \"wsanchez\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n \"@xattrs\":\n {\n md5Attr: \"12345\",\n },\n },\n \"@xattrs\":\n {\n cTagAttr: \"12345\",\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Pickled XML Doc\n freeBusyAttr: cPickle.dumps(WebDAVDocument.fromString(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\\r\\n</calendar-free-busy-set>\\r\\n\").root_element),\n },\n },\n },\n },\n \"groups\":\n {\n \"managers\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n cTagAttr: isValidCTag, # method below\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\"),\n },\n },\n },\n },\n },\n \"9F\":\n {\n \"F6\":\n {\n \"9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_ipam_services_partial_update(self):\n pass",
"def test_patchorganizations_item(self):\n pass",
"def test_bond_buttons_inactive(self):\n self.assertFalse(PageObject.find_element\n (InterfacesSettings(), 'bond_interfaces').\n is_enabled())\n self.assertFalse(PageObject.find_element\n (InterfacesSettings(), 'unbond_interfaces').\n is_enabled())",
"def test_update_attribute_data(self):\n pass"
]
| [
"0.54488975",
"0.519129",
"0.51017874",
"0.50756145",
"0.5068723",
"0.5064646",
"0.5036777",
"0.50349975",
"0.50123906",
"0.49567854",
"0.49487585",
"0.49486288",
"0.48828766",
"0.48776853",
"0.48565727",
"0.48427084",
"0.4841117",
"0.48325676",
"0.4821195",
"0.4814",
"0.48124582",
"0.48102155",
"0.4804125",
"0.479817",
"0.47745046",
"0.47737613",
"0.47645688",
"0.4758539",
"0.4750764",
"0.47435588"
]
| 0.62855875 | 0 |
Verify that the hierarchy described by "before", when upgraded, matches the hierarchy described by "after". | def verifyDirectoryComparison(self, before, after, reverify=False):
root = self.createHierarchy(before)
config.DocumentRoot = root
config.DataRoot = root
(yield self.doUpgrade(config))
self.assertTrue(self.verifyHierarchy(root, after))
if reverify:
# Ensure that repeating the process doesn't change anything
(yield self.doUpgrade(config))
self.assertTrue(self.verifyHierarchy(root, after)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assertBalanceChange (self, before, changes):\n\n after = self.getBalances ()\n assert_equal (len (before), len (changes))\n assert_equal (after, [before[i] + changes[i] for i in range (len (before))])",
"def _detect_changes(before_states, after_states):\n\n return MigrationAutodetector(\n _make_project_state(before_states), _make_project_state(after_states)\n )._detect_changes()",
"def _compare_children(self, source_children, dest_children, unexpected):\r\n dest_cursor = 0\r\n for child in source_children:\r\n child = child.version_agnostic()\r\n if child.block_id in unexpected:\r\n self.assertNotIn(child.block_id, [dest.block_id for dest in dest_children])\r\n else:\r\n self.assertEqual(child.block_id, dest_children[dest_cursor].block_id)\r\n dest_cursor += 1\r\n self.assertEqual(dest_cursor, len(dest_children))",
"def test_calendarsUpgradeWithOrphans(self):\n\n before = {\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n \"unknowngroup\":\n {\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"async def on_member_update(before, after):\r\n if Counter(before.roles) == Counter(after.roles):\r\n return\r\n await check_member_rules(after)",
"def test_change(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents and deepcopy everything (just to be sure)\n cfg = Config().genome\n gene1, gene2 = get_connection_genes((-1, 0), cfg)\n gene1_en = deepcopy(gene1.enabled)\n gene1_w = deepcopy(gene1.weight)\n gene2_en = deepcopy(gene2.enabled)\n gene2_w = deepcopy(gene2.weight)\n \n # Perform crossover and mutations\n gene3 = gene1.crossover(other=gene2, cfg=cfg, ratio=0.5)\n gene3.enabled = False\n gene3.weight = -10\n \n # Check for unchanged parents\n self.assertEqual(gene1.enabled, gene1_en)\n self.assertEqual(gene1.weight, gene1_w)\n self.assertEqual(gene2.enabled, gene2_en)\n self.assertEqual(gene2.weight, gene2_w)",
"def test_calendarsUpgradeWithDuplicateOrphans(self):\n\n before = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n \"unknowngroup\":\n {\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n \"unknownuser.1\":\n {\n },\n \"unknowngroup.1\":\n {\n },\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_case34(self):\n \n self.graph1.swapStudents(\"student1\",\"supervisor1\",\"student2\",\"supervisor1\")\n\n result1 = self.graph1.getSupervisors(\"student1\")\n result2 = self.graph1.getSupervisors(\"student2\")\n\n expected1 = ['supervisor1']\n expected2 = ['supervisor1']\n\n self.assertEqual((result1,result2),(expected1,expected2))",
"def test_upgrade(self):\n with cd(self.latest_agent_name):\n latest_agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # upgrade again to check it workd with upgraded version\n with cd(self.agent_name):\n self.runner.invoke( # pylint: disable=no-member\n cli,\n [\"--skip-consistency-check\", \"upgrade\", \"--local\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n agent_items = set(\n ItemRemoveHelper(self.load_mock_context())\n .get_agent_dependencies_with_reverse_dependencies()\n .keys()\n )\n assert latest_agent_items == agent_items\n\n # compare both configuration files, except the agent name and the author\n upgraded_agent_dir = Path(self.agent_name)\n latest_agent_dir = Path(self.latest_agent_name)\n lines_upgraded_agent_config = (\n (upgraded_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n lines_latest_agent_config = (\n (latest_agent_dir / DEFAULT_AEA_CONFIG_FILE).read_text().splitlines()\n )\n # the slice is because we don't compare the agent name and the author name\n assert lines_upgraded_agent_config[2:] == lines_latest_agent_config[2:]\n\n # compare vendor folders.\n assert are_dirs_equal(\n upgraded_agent_dir / \"vendor\", latest_agent_dir / \"vendor\"\n )",
"def test_b(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertTrue(v1 >= v2)\n self.assertTrue(v2 >= v1)",
"def test_difference_in_hierarchy(self):\n self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'dog.n.01'), 0))\n self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('mammal.n.01', 'dog.n.01'), 0.9384287))\n self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'mammal.n.01'), -0.9384287))",
"def test_b(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertTrue(v1 <= v2)\n self.assertTrue(v2 <= v1)",
"def test_case33(self):\n \n self.graph1.swapStudents(\"student1\",\"supervisor1\",\"student3\",\"supervisor3\")\n\n result1 = self.graph1.getSupervisors(\"student3\")\n result2 = self.graph1.getSupervisors(\"student1\")\n\n expected1 = ['supervisor1']\n expected2 = ['supervisor3']\n\n self.assertEqual((result1,result2),(expected1,expected2))",
"def check_hierarchy():\n\n pairs = self._pairs[:]\n while pairs:\n transform, _ = pairs.pop()\n expected_parent, _ = pairs[-1] if pairs else (None, None)\n\n if not expected_parent:\n break\n\n # Walk up the hierarchy until you find what\n # is supposed to be the parent.\n #\n # .\n # |--o a /|\\\n # |--o B |\n # |--o c |\n # |--o d |\n # |\n #\n valid = False\n for parent in transform.lineage():\n if parent == expected_parent:\n valid = True\n break\n\n problem = (\n \"%s was not a parent of %s\" % (\n expected_parent, transform)\n )\n\n # Ok, so the prior link isn't a parent, but we\n # also must make it isn't a child of the subsequent\n # link, as that would mean a cycle\n #\n # |\n # |--o a |\n # |--o B |\n # |--o c |\n # |--o d |\n # \\ /\n # `\n if not valid:\n is_child = False\n for child in transform.descendents():\n if child == expected_parent:\n is_child = True\n break\n\n # It's valid if the Maya parent isn't a Ragdoll child\n valid = not is_child\n\n # This flips the problem on its head\n problem = (\n \"%s cannot be a child of %s, that's a cycle\" % (\n expected_parent, transform)\n )\n\n assert valid, problem",
"def test_change(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents and deepcopy everything (just to be sure)\n cfg = Config().genome\n gene1, gene2 = get_simple_node_gene(0, cfg)\n gene1_act = deepcopy(gene1.activation)\n gene1_agg = deepcopy(gene1.aggregation)\n gene1_bias = deepcopy(gene1.bias)\n gene2_act = deepcopy(gene2.activation)\n gene2_agg = deepcopy(gene2.aggregation)\n gene2_bias = deepcopy(gene2.bias)\n \n # Perform crossover and mutations\n gene3 = gene1.crossover(other=gene2, cfg=cfg, ratio=0.5)\n gene3.activation = 'c'\n gene3.aggregation = 'c'\n gene3.bias = -10\n \n # Check for unchanged parents\n self.assertEqual(gene1.activation, gene1_act)\n self.assertEqual(gene1.aggregation, gene1_agg)\n self.assertEqual(gene1.bias, gene1_bias)\n self.assertEqual(gene2.activation, gene2_act)\n self.assertEqual(gene2.aggregation, gene2_agg)\n self.assertEqual(gene2.bias, gene2_bias)",
"def test_change(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents and deepcopy everything (just to be sure)\n cfg = Config().genome\n gene1, gene2 = get_output_node_gene(0, cfg)\n gene1_act = deepcopy(gene1.activation)\n gene1_agg = deepcopy(gene1.aggregation)\n gene1_bias = deepcopy(gene1.bias)\n gene2_act = deepcopy(gene2.activation)\n gene2_agg = deepcopy(gene2.aggregation)\n gene2_bias = deepcopy(gene2.bias)\n \n # Perform crossover and mutations\n gene3 = gene1.crossover(other=gene2, cfg=cfg, ratio=0.5)\n gene3.activation = 'c'\n gene3.aggregation = 'c'\n gene3.bias = -10\n \n # Check for unchanged parents\n self.assertEqual(gene1.activation, gene1_act)\n self.assertEqual(gene1.aggregation, gene1_agg)\n self.assertEqual(gene1.bias, gene1_bias)\n self.assertEqual(gene2.activation, gene2_act)\n self.assertEqual(gene2.aggregation, gene2_agg)\n self.assertEqual(gene2.bias, gene2_bias)",
"def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 < v2)\n self.assertTrue(v2 < v1)",
"def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 < v2)\n self.assertTrue(v2 < v1)",
"def testBDSupToSub(self):\n self.assertEqual(\n self.sup2Sub,\n self.config.sup2Sub\n )",
"def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertTrue(v1 != v2)\n self.assertTrue(v2 != v1)",
"def test_case7(self):\n\n graph3 = self.graph1.merge(self.graph2)\n\n expected = {'supervisor1':['student1','student2','student4'],'supervisor2':['student4','student1','student3'],'supervisor3':['student3','student2']}\n\n result = True\n\n for sup in expected:\n for stu in expected[sup]:\n if not graph3.isEdge(sup,stu):\n result = False\n break\n\n self.assertTrue(result)",
"def test_d(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n v2 = versions.Version(version='1.2.2', name='bar')\n\n self.assertFalse(v1 > v2)\n self.assertTrue(v2 > v1)",
"def test_compare_old_to_new_method_to_create_trees(self):\n nodes = util.generate_sequence_of_points(2, 2)\n tree1 = kdtree.createNewTree(nodes)\n kdtree.visualize(tree1)\n \n sel_axis = (lambda axis: axis)\n tree2 = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n tree2.split2([0.25, 0.5], axis = 1)\n tree2.split2([0.75, 0.5], axis = 1)\n \n #left\n tree2.split2([0.25, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.25, 0.75], axis = 0, sel_axis = sel_axis)\n \n #right\n tree2.split2([0.75, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.75, 0.75], axis = 0, sel_axis = sel_axis)\n \n kdtree.visualize(tree2)\n \n for n in zip(kdtree.level_order(tree1), kdtree.level_order(tree2)):\n self.assertEqual(n[0].data, n[1].data, \"elements not equal\")\n \n if n[0].data is not None and n[1].data is not None:\n self.assertEqual(n[0].axis, n[1].axis, \"elements not equal\")",
"def test_consistency(self):\n def compare_func(obj, node):\n # number of children must be consistent\n self.assertEqual(\n len(obj.children),\n len(obj._children)\n )\n\n # obj.html equals node\n self.assertEqual(obj._html, node)\n\n # coordinates\n self.assertEqual(\n obj._coordinates,\n obj.coordinates,\n self.expected[\"coordinates\"][obj.id or \"document\"]\n )\n\n # confidence\n self.assertAlmostEqual(\n obj.confidence,\n self.expected[\"confidence\"][obj.id or \"document\"]\n )\n\n self.recursively_compare_tree_against_html(compare_func)",
"def test_d(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n v2 = versions.Version(version='1.2.2', name='bar')\n\n self.assertTrue(v1 < v2)\n self.assertFalse(v2 < v1)",
"def test_calendarsUpgradeWithError(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n root = self.createHierarchy(before)\n\n config.DocumentRoot = root\n config.DataRoot = root\n\n (yield self.doUpgrade(config))\n\n self.assertTrue(self.verifyHierarchy(root, after))",
"def are_the_same(node_before, node_after) -> bool:\n\n if node_before.algorithm != node_after.algorithm:\n return False\n elif not _is_output_name_same(node_before, node_after):\n return False\n else:\n for attr in interested_attrs:\n if _exists_attr(attr, node_before, node_after) == 1 or \\\n _exists_attr(attr, node_before, node_after) == 2:\n return False\n elif _exists_attr(attr, node_before, node_after) == 12 and \\\n node_before.attributes[attr] != node_after.attributes[attr]:\n return False\n return True",
"def test_equality_function(self):\r\n self.assertFalse(directories_equal(self.version1_nodrafts, self.version0_nodrafts))\r\n self.assertFalse(directories_equal(self.version1_drafts_extra_branch, self.version1_drafts))",
"def test_a(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n v2 = versions.Version(version='1.2.2', name='bar')\n\n self.assertTrue(v1 <= v2)\n self.assertFalse(v2 <= v1)",
"def test_a(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n v2 = versions.Version(version='1.2.2', name='bar')\n\n self.assertFalse(v1 >= v2)\n self.assertTrue(v2 >= v1)"
]
| [
"0.62915796",
"0.611342",
"0.59514964",
"0.5916525",
"0.5836583",
"0.57875043",
"0.57854444",
"0.5783139",
"0.578169",
"0.5724749",
"0.57057667",
"0.5702303",
"0.5697221",
"0.5692152",
"0.5687135",
"0.5671171",
"0.563159",
"0.563159",
"0.5571707",
"0.5537103",
"0.55254525",
"0.551455",
"0.5460627",
"0.5448515",
"0.5447876",
"0.5446832",
"0.5445015",
"0.5441617",
"0.54220784",
"0.54151917"
]
| 0.72657055 | 0 |
The upgrade process should remove unused notification directories in users' calendar homes, as well as the XML files found therein. | def test_removeNotificationDirectories(self):
before = {
"calendars": {
"users": {
"wsanchez": {
"calendar": {
db_basename: {
"@contents": "",
},
},
"notifications": {
"sample-notification.xml": {
"@contents": "<?xml version='1.0'>\n<should-be-ignored />"
}
}
}
}
}
}
after = {
"calendars": {
"__uids__": {
"64": {
"23": {
"6423F94A-6B76-4A3A-815B-D52CFD77935D": {
"calendar": {
db_basename: {
"@contents": "",
},
},
}
}
}
}
},
".calendarserver_version": {
"@contents": "2",
},
}
(yield self.verifyDirectoryComparison(before, after)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _cleanPackageDir(self, *_):\r\n for _, path in self._pkgDir:\r\n os.rmdir(os.path.join(self._rootfs, path))\r\n\r\n assert len(self._containers) == 0",
"def cleanupInstall(self):\n\n os.chdir( os.path.dirname(self.installPath) )\n tryunlink( self.download.tarball )",
"def remove_custom_installation(self):\n\n logger.info(\"Removing old customization\")\n for candidate in os.listdir(self.rundir):\n if candidate not in (\"config\", \"delta\"):\n candidate = os.path.join(self.rundir, candidate)\n try:\n shutil.rmtree(candidate)\n except NotADirectoryError:\n os.remove(candidate)",
"def cleanup():\n if len(env.releases) > 3:\n directories = env.releases\n directories.reverse()\n del directories[:3]\n env.directories = ' '.join([ '%(releases_path)s/%(release)s' % { 'releases_path':env.releases_path, 'release':release } for release in directories ])\n run('rm -rf %(directories)s' % env)",
"def purge_files(self):\n run_keyword(\"Purge Server Configuration\")\n run_keyword(\"Purge Cache Manager Configuration\")\n # TODO: Probably the only sane way to do this is to call\n # a helper script which runs as root.\n # run_keyword(\"Purge Cache\")\n valid = r'/vicep([a-z]|[a-h][a-z]|i[a-v])$'\n for vicep in glob.glob(\"/vicep*\"):\n if re.match(valid, vicep) and os.path.isdir(vicep):\n run_keyword(\"Purge Directory\", \"%s/AFSIDat\" % vicep)\n run_keyword(\"Purge Directory\", \"%s/Lock\" % vicep)\n for vheader in glob.glob(\"%s/V*.vol\" % vicep):\n run_keyword(\"Sudo\", \"rm -f %s\" % vheader)",
"def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")",
"def remove_packages(self, packages):",
"def clean_all_folder():\n LOGGER.warning('removal of old files has been temporarily disabled')\n # paths_to_clean = CFG.remove_files\n # if paths_to_clean: # pylint: disable=using-constant-test\n # for remove_config in paths_to_clean: # pylint: disable=not-an-iterable\n # name = tuple(remove_config.keys())[0]\n # LOGGER.info(f'processing: {name}')\n # remove_config = remove_config[name]\n # if 'folder' not in remove_config.keys():\n # LOGGER.error(f'missing \"folder\" in {name}')\n # return\n # if 'age' not in remove_config.keys():\n # LOGGER.error(f'missing \"age\" in {name}')\n # return\n # if not os.path.exists(remove_config['folder']):\n # LOGGER.error(f'path does not exist: {remove_config[\"folder\"]}')\n # return\n # _remove_old_files_from_folder(**remove_config)\n # else:\n # LOGGER.debug('no folder to clean')",
"def autodiscover(self):\n\n old_packages = self.list_task_packages()\n\n files = os.listdir(self.tasks_dir)\n for filename in files:\n pkg_dir = os.path.join(self.tasks_dir, filename)\n if os.path.isdir(pkg_dir):\n self.read_task_package(filename)\n old_packages.discard(filename)\n\n for pkg_name in old_packages:\n self.emit('TASK_REMOVED', pkg_name)",
"def cleanup(self):\n\tprint \"clean up on \" + self.dest\n for root, folders, files in os.walk(self.dest):\n for ignore_dir in self.ignore_dirs:\n if ignore_dir in folders:\n folders.remove(ignore_dir)\n\t\t \n for folder in folders:\n backupdir = os.path.join(root,folders)\n sourcedir = bakupdir.replace(destination,source) \n if not os.path.exists(sourcedir):\n trash = backupdir.replace(destination,trash_dir)\n # shutil.move(backupdir, trash)\n print(\"move\",backupdir,\"to\",trash)\n # os.utime(trash, None)\n \n for filename in files:\n checkfile = root + \"/\" + filename\n checkfile = checkfile.replace(self.dest, self.source)\n print(\"checking if \", checkfile, \"exists\")\n if not os.path.exists(checkfile): \n print os.path.join(root,filename)\n\t\t backupfile = checkfile.replace(self.source,self.dest)\n trash = self.trash + checkfile.replace(self.source, \"\")\n # shutil.move(backupfile, trash)\n print(\"move\",backupfile,\"to\",trash)\n # os.utime(trash, None)",
"def clean_configuration_directory():\r\n for locale in CONFIGURATION.translated_locales:\r\n clean_conf_folder(locale)",
"def clear_old_files(self):\n self.logger.logMsg(\"Clearing Old Files.....\")\n try:\n for files in os.listdir(self.download_path):\n path = os.path.join(self.download_path, files)\n os.remove(path)\n for files in os.listdir(self.outpath):\n path = os.path.join(self.outpath, files)\n os.remove(path)\n except Exception as e:\n self.logger.logError(\"Error Creating Old Files {}.....\".format(str(e)))\n raise Exception('Error in Clearing Old Files')\n\n self.logger.logMsg(\"Done Clearing Old Files.....\")",
"def cleanup():\n download_dir = settings.DOWNLOAD_BASE_DIR\n\n for base, dirs, files in os.walk(download_dir):\n for dir in dirs:\n shutil.rmtree(download_dir + dir)",
"def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)",
"def cleanUpPackage(inProgressFilename, packageFilename, propFilename):\n try:\n for filename in (inProgressFilename, packageFilename, propFilename):\n if (filename is not None and os.path.exists(filename)):\n os.remove(filename)\n\n except OSError, osErr :\n LOG.error('Unable to cleanup Package (%s)' % osErr)",
"def delete_old_notifications():\n\n try:\n from common.models import (InvenTreeSetting, NotificationEntry,\n NotificationMessage)\n\n days = InvenTreeSetting.get_setting('INVENTREE_DELETE_NOTIFICATIONS_DAYS', 30)\n threshold = timezone.now() - timedelta(days=days)\n\n items = NotificationEntry.objects.filter(\n updated__lte=threshold\n )\n\n if items.count() > 0:\n logger.info(f\"Deleted {items.count()} old notification entries\")\n items.delete()\n\n items = NotificationMessage.objects.filter(\n creation__lte=threshold\n )\n\n if items.count() > 0:\n logger.info(f\"Deleted {items.count()} old notification messages\")\n items.delete()\n\n except AppRegistryNotReady:\n logger.info(\"Could not perform 'delete_old_notifications' - App registry not ready\")",
"def cleanup(self):\n\n # uninstall sourcedata\n if self.conversion.install_dataset_path.exists():\n # without the ChangeWorkingDir the command does not operate inside\n # of dataset_path\n with utils.ChangeWorkingDir(self.dataset_path):\n datalad.uninstall(\n path=self.conversion.install_dataset_name,\n dataset=self.dataset_path,\n recursive=True\n )\n\n # remove bids conversion\n bids_dir = self._get_bids_dir()\n if bids_dir.exists():\n self.log.info(\"Remove %s\", bids_dir)\n shutil.rmtree(bids_dir)",
"def clean_directory():\n print(\"INFO: Cleaning old files...\")\n if os.path.exists(os.path.join(os.path.dirname(__file__), 'Scripts')):\n try:\n shutil.rmtree(os.path.join(os.path.dirname(__file__), 'Scripts'))\n except OSError as error:\n print(\"Error: %s - %s.\" % (error.filename, error.strerror))",
"def cleanStamps(self, criteria):\n dirEmpty = True\n for s in self.iterStamps():\n if criteria(s):\n os.unlink(self.getFile(s))\n else:\n dirEmpty = False\n try:\n os.rmdir(self.path)\n except OSError:\n pass",
"def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()",
"def _remove_files_dirs(self):\n if self.remove_remote_files_dirs:\n self._remove_remote_files_dirs()",
"def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()",
"def clean_up_steps(path: str):\n try:\n for step in os.listdir(path):\n if step.endswith('.xml'):\n os.remove(os.path.join(path, step))\n except:\n print(\"ERROR: failed to clean up steps folder.\")",
"def removeEmpties(self,name):\n empties = set()\n projectDir = dirs['installers'].join(name)\n for asDir,sDirs,sFiles in os.walk(projectDir.s):\n if not (sDirs or sFiles): empties.add(GPath(asDir))\n for empty in empties: empty.removedirs()\n projectDir.makedirs() #--In case it just got wiped out.",
"def purge_downloaded_files():\n for fpath in DOWNLOADED_FILEPATHS:\n if os.path.exists(fpath):\n os.remove(fpath)",
"def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)",
"def teardown(self):\n folder = os.path.join(expanduser('~'), '.drupdates', 'plugins')\n if os.path.isdir(folder):\n shutil.rmtree(folder)",
"def unlink(self):\n\t\tadiff = ApplicationDifferencer()\n\n\t\t# Determine the differences between what's in the\n\t\t# application's directory and what's currently\n\t\t# available from the root filesystem (in relation\n\t\t# to this application).\n\t\tresults = adiff.scan(\n\t\t\t\tos.path.join(\n\t\t\t\t\tAppFolders.get(self.type),\n\t\t\t\t\tself.name + \"/\" + self.version\n\t\t\t\t\t),\n\t\t\t\tTrue\n\t\t\t\t);\n\t\t\n\t\tsafe_app_dir = os.path.join(\n AppFolders.get(self.type),\n self.name # We exclude the version here because we could be\n # checking against a link that's under Current or\n # a specific version.\n )\n\t\t\n\t\t# Preemptively go through the list of directories, removing those\n\t\t# that are symlinks to the application folder. This is from the legacy\n\t\t# link system and unfortunatly if you let the block below this run\n\t\t# through a system with said symlinks, you'll end up annihilating the\n\t\t# the application files (because it'll walk through the symlink into\n\t\t# the application directory and start rm'ing stuff we don't want to)\n\t\t# The solution here is to go through and remove directory symlinks before\n\t\t# hand, with a reversed result list (in effect reversing the walk process\n\t\t# in adiff.scan) so that we elimate the top level symlinks first, preventing\n\t\t# it from annihilating symlinked directories inside the application folder.\n\t\t# Very annoying stuff.\n\t\t#\n\t\t# XXX: I almost hosed the entire Elementary system with this. Apparently it\n\t\t# that removing symlinked directories included some of the base ones\n\t\t# such as /lib and /bin (because the Python install contains those dirs\n\t\t# too :P). The only_sub variable defines that only paths that resolve\n\t\t# to a *subdirectory* of those specified can be removed if it's a symlinked\n\t\t# directory. This prevents removal of /bin, /lib, etc.. symlinks.\n\t\t#\n\t\tonly_sub = [\n\t\t\t\t\"/System/Utilities/Applications\",\n\t\t\t\t\"/System/Utilities/Libraries\",\n\t\t\t\t\"/Applications\",\n\t\t\t\t\"/Users\"\n\t\t\t]\n\t\tresults.reverse()\n\t\ttrip_safety = False\n\t\tfor i in results:\n\t\t\t# Legacy removal is a special case because directories will be detected\n\t\t\t# as file entries (because they are symlinks). Therefore, we need to use\n\t\t\t# os.path.realpath and os.path.isdir to find out whether it's really a directory\n\t\t\t# or not.\n\t\t\tis_directory = os.path.isdir(os.path.realpath(i[2]))\n\n\t\t\t# Get file information.\n\t\t\ttry:\n\t\t\t\tpstat = os.lstat(i[2])[stat.ST_MODE]\n\t\t\texcept:\n\t\t\t\t# Likely broken when we removed a directory symlink.\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# Determine whether we should proceed with this entry.\n\t\t\tif (not is_directory):\n\t\t\t\tcontinue\n\t\t\tif (not stat.S_ISLNK(pstat)):\n\t\t\t\tcontinue\n\n\t\t\t# Determine whether it's safe to remove this symlinked dir.\n\t\t\tif (not self.isApplicationOwned(i[2], safe_app_dir)):\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# Double-check before we go unlinking (in case of a logic oversight).\n\t\t\tif (is_directory and stat.S_ISLNK(pstat)):\n\t\t\t\ttrip_safety = True\n\t\t\t\ttry:\n\t\t\t\t\tself.oper_unlink(i[2])\n\t\t\t\t\tlog.showWarningW(\"Removed symlinked directory at: \" + i[2])\n\t\t\t\t\tlog.showWarningW(\"The full path was: \" + rpath)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\tresults.reverse()\t\t\n\n\t\tif (trip_safety):\n\t\t\tlog.showErrorW(\"Legacy system safety switch was tripped. This indicates you have\")\n\t\t\tlog.showErrorO(\"symlinked directories on your system (from legacy linkage systems).\")\n\t\t\tlog.showErrorO(\"The unlinking process has removed at least one of those symlinked\")\n\t\t\tlog.showErrorO(\"directories. In order to make sure application files don't get\")\n\t\t\tlog.showErrorO(\"removed, you need to run the unlink process again to ensure the system\")\n\t\t\tlog.showErrorO(\"is scanned without symlinked directories. If the process shows this\")\n\t\t\tlog.showErrorO(\"message twice, then STOP and REMOVE THE SYMLINKS MANUALLY. You risk\")\n\t\t\tlog.showErrorO(\"destroying application installations if you continue.\")\n\t\t\tsys.exit(1)\n\t\t\n\n\t\t# Now go through the results, removing directories (if they're\n\t\t# empty) and un-symlinking files (but making sure that we only\n\t\t# remove symlinks and not normal files).\n\t\tattempt_successes = list()\n\t\tattempt_failures = list()\n\t\tattempt_notexists = list()\n\t\ttotal_files = 0\n\t\tfor i in results:\n\t\t\ttotal_files += 1\n\t\t\ttry:\n\t\t\t\tpstat = os.lstat(i[2])[stat.ST_MODE]\n\t\t\texcept:\n\t\t\t\t# File doesn't exist. Likely got removed while we unlinked\n\t\t\t\t# a `symlinked' directory (from old linkage system).\n\t\t\t\tcontinue\n\n\t\t\t# Check to make sure that the file we're going to remove is located\n\t\t\t# within a safe directory.\n\t\t\tif (not self.isApplicationOwned(i[2], safe_app_dir)):\n\t\t\t\t# This check only applies to symlinks, not real directories.\n\t\t\t\tif ((i[0] == \"file\" or i[0] == \"directory\") and stat.S_ISLNK(pstat)):\n\t\t\t\t\tlog.showInfoW(\"Ignoring \" + i[2] + \" because it's not owned by the application.\")\n\t\t\t\t\tcontinue\n\n\t\t\tif (i[0] == \"directory\" and not stat.S_ISLNK(pstat)):\n\t\t\t\ttry:\n\t\t\t\t\tself.oper_rmdir(i[2])\n\t\t\t\t\tattempt_successes.append(i[2])\n\t\t\t\texcept:\n\t\t\t\t\tlog.showInfoW(\"Still in use: \" + i[2])\n\t\t\t\t\t# Failure to remove a directory should not be counted\n\t\t\t\t\t# as a failure since quite often directories will not be\n\t\t\t\t\t# removed because they are still in use by other applications.\n\t\t\t\t\t#attempt_failures.append(i[2])\n\t\t\telif ((i[0] == \"file\" or i[0] == \"directory\") and stat.S_ISLNK(pstat)):\n\t\t\t\ttry:\n\t\t\t\t\tself.oper_unlink(i[2])\n\t\t\t\t\tattempt_successes.append(i[2])\n\t\t\t\texcept:\n\t\t\t\t\tlog.showErrorW(\"Unable to symlink file \" + i[2])\n\t\t\t\t\tattempt_failures.append(i[2])\n\t\t\telif (i[0] == \"notexists\"):\n\t\t\t\tlog.showInfoW(\" N \" + i[2])\n\t\t\t\tattempt_notexists.append(i[2])\n\t\t\telif (i[0] != \"notexists\" and i[0] != \"file\" and i[0] != \"directory\"):\n\t\t\t\tlog.showWarningW(\"Unknown operation for \" + i[1])\n\n\t\treturn attempt_successes, attempt_failures, total_files",
"def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')",
"def remove_old_ftp_downloads(folder):\r\n date_now = datetime.datetime.utcnow()\r\n all_paths = glob(os.path.join(folder,'Runoff*netcdf*'))\r\n for path in all_paths:\r\n\tdate_file = datetime.datetime.strptime(os.path.basename(path).split('.')[1],'%Y%m%d')\r\n if os.path.isdir(path):\r\n rmtree(path)\r\n else:\r\n os.remove(path)\r\n\tif date_now - date_file < datetime.timedelta(1):\r\n\t os.mkdir(path)"
]
| [
"0.6124525",
"0.6064273",
"0.5862961",
"0.58584833",
"0.5794497",
"0.5785999",
"0.57849133",
"0.5784629",
"0.57541484",
"0.57509446",
"0.57488537",
"0.57299966",
"0.5702775",
"0.5690832",
"0.5685405",
"0.56484056",
"0.56467074",
"0.56436086",
"0.5637158",
"0.5624792",
"0.56211054",
"0.5611384",
"0.5606979",
"0.55861807",
"0.5543179",
"0.554291",
"0.5542219",
"0.5541519",
"0.5540849",
"0.5516368"
]
| 0.65725094 | 0 |
Verify that calendar homes in the /calendars/// form whose records don't exist are moved into dataroot/archived/ | def test_calendarsUpgradeWithOrphans(self):
before = {
"calendars":
{
"users":
{
"unknownuser":
{
},
},
"groups":
{
"unknowngroup":
{
},
},
},
"principals":
{
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
"archived":
{
"unknownuser":
{
},
"unknowngroup":
{
},
},
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_calendarsUpgradeWithDuplicateOrphans(self):\n\n before = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n \"unknowngroup\":\n {\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n \"unknownuser.1\":\n {\n },\n \"unknowngroup.1\":\n {\n },\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithUnknownFiles(self):\n\n ignoredUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"garbage.ics\": {\n \"@contents\": \"Oops, not actually an ICS file.\",\n },\n \"other-file.txt\": {\n \"@contents\": \"Also not a calendar collection.\"\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n before = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": ignoredUIDContents,\n },\n \"principals\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": ignoredUIDContents,\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithError(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n root = self.createHierarchy(before)\n\n config.DocumentRoot = root\n config.DataRoot = root\n\n (yield self.doUpgrade(config))\n\n self.assertTrue(self.verifyHierarchy(root, after))",
"def test_calendarsUpgradeWithNestedCollections(self):\n\n beforeUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"nested1\": {\n \"nested2\": {},\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n afterUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \".collection.nested1\": {\n \"nested2\": {},\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n before = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": beforeUIDContents,\n },\n \"principals\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": afterUIDContents,\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithNoChange(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\\r\\n\"),\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\\r\\n\"),\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def validate_archive(self):\n\t\t# refresh the archive pool searches\n\t\tself.pool_search(refresh=True)\n\t\tif self.status == 'archived':\n\t\t\treturn True\n\t\treturn False",
"def test_directory_path_with_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=True)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (with-calendar)\",\n )\n assert directory_path == exist_directory_path",
"def test_noTimedEventsExtraction(self):\n userbase.extractUserStore(self.account, self.destdir)\n self.assertEqual(\n self.ls.accountByAddress(self.localpart, self.domain),\n None)\n\n self.assertFalse(list(self.store.query(SubStore, SubStore.storepath == self.origdir)))\n self.origdir.restat(False)\n self.assertFalse(self.origdir.exists())\n self.assertFalse(list(self.store.query(_SubSchedulerParentHook)))",
"def ____test_sys_archived(self):\n # TODO archinging not working\n with ts.SetupDbAndCredentials() as s:\n args = [\"--archived\", \"--skip-albums\", \"--start-date\", \"2017-01-01\"]\n s.test_setup(\n \"test_sys_archived\", args=args, trash_files=True, trash_db=True\n )\n s.gp.start(s.parsed_args)\n\n db = LocalData(s.root)\n\n # Total of 1 out of media items\n db.cur.execute(\"SELECT COUNT() FROM SyncFiles\")\n count = db.cur.fetchone()\n self.assertEqual(1, count[0])",
"def test_calendarsUpgradeWithInboxItems(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n \"oldinboxitem\": {\n \"@contents\": \"\",\n \"@timestamp\": 1, # really old file\n },\n \"newinboxitem\": {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"inboxitems.txt\":\n {\n \"@contents\": None, # ignore contents, the paths inside are random test directory paths\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n \"newinboxitem\": {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def test_directory_path_without_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=False)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (without-calendar)\",\n )\n assert directory_path == exist_directory_path",
"def test_archived(self):\n doc = DocumentFactory(title=u'impalas', locale=u'en-US', is_archived=True)\n ApprovedRevisionFactory(document=doc, summary=u'impalas', is_approved=True)\n\n self.refresh()\n\n # include_archived gets the above document\n qs = {'q': 'impalas', 'a': 1, 'w': 1, 'format': 'json',\n 'include_archived': 'on'}\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_(1, len(results))\n\n # no include_archived gets you nothing since the only\n # document in the index is archived\n qs = {'q': 'impalas', 'a': 0, 'w': 1, 'format': 'json'}\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_(0, len(results))",
"def test_delete_all_archives_failure(self):\n support = saltsupport.SaltSupportModule()\n support.archives = MagicMock(\n return_value=[\n \"/mnt/storage/one-support-000-000.bz2\",\n \"/mnt/storage/two-support-111-111.bz2\",\n \"/mnt/storage/three-support-222-222.bz2\",\n ]\n )\n ret = support.delete_archives()\n assert \"files\" in ret\n assert \"errors\" in ret\n assert bool(ret[\"errors\"])\n assert bool(ret[\"files\"])\n assert isinstance(ret[\"errors\"], dict)\n assert isinstance(ret[\"files\"], dict)\n\n assert ret[\"files\"][\"/mnt/storage/three-support-222-222.bz2\"] == \"removed\"\n assert ret[\"files\"][\"/mnt/storage/one-support-000-000.bz2\"] == \"left\"\n assert ret[\"files\"][\"/mnt/storage/two-support-111-111.bz2\"] == \"left\"\n\n assert len(ret[\"errors\"]) == 2\n assert (\n ret[\"errors\"][\"/mnt/storage/one-support-000-000.bz2\"]\n == \"Decreasing electron flux\"\n )\n assert (\n ret[\"errors\"][\"/mnt/storage/two-support-111-111.bz2\"]\n == \"Solar flares interference\"\n )",
"def test_files_missing():\n\tfiledir = \"./goes_files/%Y_events/%Y%m%devents.txt\"\n\tt0 = timerange.start.datetime\n\tdays = [t0]\n\twhile timerange.end.datetime > t0:\n\t\tt0 = t0 + relativedelta(days=1)\n\t\tdays.append(t0)\n\n\tmissing_files = []\n\tfor d in days:\n\t\tif not os.path.exists(d.strftime(filedir)):\n\t\t\tmissing_files.append(d.strftime(filedir))\n\tprint(missing_files)",
"def dwnlds_archived(self, replace_path=None, new_path=None):\n raise EODataDownException(\"Not implemented.\")",
"def check_delete_calendar_list_in_list(model_list, calendar_id):\n log_info(\"Compare delete calendar list.\\nList CalendarID:\\n{model_list}\\nExpected CalendarID:\\n{calendar_id}\"\n .format(model_list='\\n'.join(str(item.cal_id) for item in model_list), calendar_id=calendar_id))\n for item in model_list:\n if item.cal_id == calendar_id:\n assert False, \"Current calendar id is presented\"",
"def check_articles_to_archive():\n from crawler.core.models import Article\n # Filter articles that can't be archived\n articles = Article.objects.all().should_be_archived()\n articles = articles.should_be_archived()\n # queryset of articles needing immediate archiving\n archive_articles_qs = detect_notfound(\n articles.not_found_only_tagged()\n ).union(\n articles.release_date_tagged()\n ).union(\n articles.priority_tagged()\n )\n\n archive_articles.apply_async(\n ids=list(set(archive_articles_qs.values_list('pk'))),\n skip_filter=True\n )",
"def test_ftw_journal_is_not_versioned_archeologist(self):\n self.login(self.regular_user)\n\n self.create_version(self.document)\n\n repo_tool = api.portal.get_tool('portal_repository')\n shadow_history = repo_tool.getHistoryMetadata(self.document)\n self.assertEquals(2, len(shadow_history))\n\n for version_number in range(len(shadow_history)):\n archeologist = Archeologist(\n self.document, repo_tool.retrieve(\n self.document, selector=version_number))\n\n archived_obj = archeologist.excavate()\n archived_ann = IAnnotations(archived_obj)\n self.assertNotIn(self.JOURNAL_KEY, archived_ann)",
"def test_removeNotificationDirectories(self):\n\n before = {\n \"calendars\": {\n \"users\": {\n \"wsanchez\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"notifications\": {\n \"sample-notification.xml\": {\n \"@contents\": \"<?xml version='1.0'>\\n<should-be-ignored />\"\n }\n }\n }\n }\n }\n }\n\n after = {\n \"calendars\": {\n \"__uids__\": {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n }\n }\n }\n }\n },\n \".calendarserver_version\": {\n \"@contents\": \"2\",\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def test_admin_calendar_admin_list(self):\n response = self.client.get(\"/admin/appointment/calendar/\")\n self.assertEqual(response.status_code, 200)",
"def check_date(dates):\n\n # Loads file list from raw and processed data dirs\n data_dir = basedir + '/app/static/data/saved'\n data_files = [f for f in os.listdir(data_dir) if not f.startswith('.DS')]\n\n print data_files\n\n raw_data_dir = basedir + '/app/static/data/raw'\n raw_data_files = [f for f in os.listdir(raw_data_dir) if not f.startswith('.DS')]\n\n # If neither a raw or processed file exists, we haven't collected it\n # Sorts these uncollected files by date for reference\n if data_files or raw_data_files:\n saved_dates = []\n raw_dates = []\n\n if data_files:\n saved_dates = [data_file.split('.')[0] for data_file in data_files]\n saved_dates = sorted(saved_dates, reverse=True)\n if raw_data_files:\n raw_dates = [raw_file.split('.')[0] for raw_file in raw_data_files]\n raw_dates = sorted(raw_dates, reverse=True)\n\n uncrawled_dates = []\n for date in dates:\n if date not in saved_dates and date not in raw_dates:\n uncrawled_dates.append(date)\n else:\n uncrawled_dates = dates\n\n print 'Uncrawled file dates'\n print uncrawled_dates\n print ''\n\n return uncrawled_dates",
"def test_enter_without_dirs(self):\n archive = DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR)\n\n self.assertEqual(archive.user, self.user)\n self.assertEqual(archive.working_dir_path, DATA_DOWNLOADS_WORKING_DIR)\n\n self.assertIsNone(archive.tmp_dir_path)\n self.assertIsNone(archive.data_dir_path)",
"def cleanup(self):\n\tprint \"clean up on \" + self.dest\n for root, folders, files in os.walk(self.dest):\n for ignore_dir in self.ignore_dirs:\n if ignore_dir in folders:\n folders.remove(ignore_dir)\n\t\t \n for folder in folders:\n backupdir = os.path.join(root,folders)\n sourcedir = bakupdir.replace(destination,source) \n if not os.path.exists(sourcedir):\n trash = backupdir.replace(destination,trash_dir)\n # shutil.move(backupdir, trash)\n print(\"move\",backupdir,\"to\",trash)\n # os.utime(trash, None)\n \n for filename in files:\n checkfile = root + \"/\" + filename\n checkfile = checkfile.replace(self.dest, self.source)\n print(\"checking if \", checkfile, \"exists\")\n if not os.path.exists(checkfile): \n print os.path.join(root,filename)\n\t\t backupfile = checkfile.replace(self.source,self.dest)\n trash = self.trash + checkfile.replace(self.source, \"\")\n # shutil.move(backupfile, trash)\n print(\"move\",backupfile,\"to\",trash)\n # os.utime(trash, None)",
"def cleanup(self, archive, files):\n mtime = self.test(archive, files)\n backup_home = os.path.join(self.download_dir, '-')\n if not os.path.exists(backup_home):\n os.makedirs(backup_home)\n backup_dir = tempfile.mkdtemp('', datetime.utcnow().strftime(\"%Y-%m-%d_\"), backup_home)\n for file in files:\n os.makedirs(os.path.join(backup_dir, file))\n if os.path.getmtime(file) != mtime[file]:\n raise RuntimeError(\"Failed to cleanup archived data: %s has been modified.\" % file)\n os.rename(file, os.path.join(backup_dir, file))\n self.log.debug(\"Moved %s to %s\" % (file, os.path.join(backup_dir, file)))\n return",
"def obter_pastas_inexistentes_no_destino(self):\n \n #laço em todas as subpasta da pasta de origem\n for pasta in self.sub_pastas_nao_finalizadas:\n \n #forma um path da pasta de destino com a subpasta da vez\n pasta_destino = os.path.join(self.pasta_destino.obter_caminho(),\\\n os.path.basename(pasta))\n \n #verifica se a pasta de destino já existe, senão existir, inclui\n #na lista da operações para criação futura.\n if not os.path.exists(pasta_destino):\n self.__class__.operacoes_criar_pastas.append(pasta_destino)",
"def test_calendarsUpgradeWithUIDs(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Plain XML\n freeBusyAttr: \"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\\r\\n</calendar-free-busy-set>\\r\\n\",\n },\n },\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n },\n \"@xattrs\":\n {\n cTagAttr: isValidCTag, # method below\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_history_import_relpath_in_archive():\n dest_parent = mkdtemp()\n with HistoryArchive(arcname_prefix='../insecure') as history_archive:\n\n history_archive.write_metafiles()\n history_archive.write_file('datasets/Pasted_Entry_1.txt', 'foo')\n history_archive.finalize()\n _run_unpack(history_archive, dest_parent, 'Relative parent path in import archive allowed')",
"def test_sys_album_add_file(self):\n with ts.SetupDbAndCredentials() as s:\n args = [\"--start-date\", \"2017-09-19\", \"--end-date\", \"2017-09-20\"]\n s.test_setup(\n \"test_sys_album_add_file\", args=args, trash_db=True, trash_files=True\n )\n s.gp.start(s.parsed_args)\n\n pat = str(albums_root / \"2017\" / \"0923 Clones😀\" / \"*.*\")\n files = sorted(s.root.glob(pat))\n self.assertEqual(4, len(files))\n\n # spoof the album to pretend it only got 3 files up to 2017-09-20\n db = LocalData(s.root)\n db.cur.execute(\n \"UPDATE Albums SET EndDate='2017-09-20',\"\n \"Size=3 WHERE \"\n \"AlbumName='Clones😀'\"\n )\n db.store()\n\n args = [\n \"--start-date\",\n \"2017-09-19\",\n \"--end-date\",\n \"2017-09-23\",\n \"--index-only\",\n ]\n s.__exit__()\n s.test_setup(\"test_sys_album_add_file\", args=args)\n s.gp.start(s.parsed_args)\n\n # the rescan will reset the date so set it back\n db = LocalData(s.root)\n db.cur.execute(\n \"UPDATE Albums SET EndDate='2017-09-20' \" \"WHERE AlbumName='Clones😀'\"\n )\n db.store()\n\n args = [\"--skip-index\", \"--skip-files\"]\n s.__exit__()\n s.test_setup(\"test_sys_album_add_file\", args=args)\n s.gp.start(s.parsed_args)\n\n pat = str(albums_root / \"2017\" / \"0920 Clones😀\" / \"*.*\")\n files = sorted(s.root.glob(pat))\n self.assertEqual(4, len(files))\n should_be_gone = s.root / albums_root / \"2017\" / \"0923 Clones😀\"\n self.assertFalse(should_be_gone.exists())\n\n # test --album-date-by-first-photo\n\n # force re-download of the album\n db.cur.execute(\n \"UPDATE Albums SET Downloaded=0 \" \"WHERE AlbumName='Clones😀'\"\n )\n db.store()\n args = [\"--skip-index\", \"--skip-files\", \"--album-date-by-first-photo\"]\n s.__exit__()\n s.test_setup(\"test_sys_album_add_file\", args=args)\n s.gp.start(s.parsed_args)\n\n pat = str(albums_root / \"2017\" / \"0919 Clones😀\" / \"*.*\")\n files = sorted(s.root.glob(pat))\n self.assertEqual(4, len(files))\n\n should_be_gone = s.root / albums_root.absolute() / \"2017\" / \"0920 Clones😀\"\n self.assertFalse(should_be_gone.exists())",
"def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def check_missing_unique_link():\n from mspray.apps.main.utils import queryset_iterator\n\n queryset = SprayDay.objects.filter(spraypoint__isnull=True).only(\n \"pk\", \"location_id\"\n )\n for record in queryset_iterator(queryset):\n add_unique_record(record.pk, record.location_id)\n gc.collect()"
]
| [
"0.6892958",
"0.64702445",
"0.59006584",
"0.5871715",
"0.5591878",
"0.55893683",
"0.5515116",
"0.5494797",
"0.53261685",
"0.52998716",
"0.52600235",
"0.5235838",
"0.5230849",
"0.52290356",
"0.51895154",
"0.5150668",
"0.5131935",
"0.51303554",
"0.50978",
"0.5095233",
"0.5077469",
"0.5073746",
"0.5069748",
"0.5050241",
"0.5013479",
"0.5007482",
"0.49862877",
"0.49739766",
"0.4960516",
"0.4957424"
]
| 0.7058807 | 0 |
Verify that calendar homes in the /calendars/// form whose records don't exist are moved into dataroot/archived/ | def test_calendarsUpgradeWithDuplicateOrphans(self):
before = {
"archived":
{
"unknownuser":
{
},
"unknowngroup":
{
},
},
"calendars":
{
"users":
{
"unknownuser":
{
},
},
"groups":
{
"unknowngroup":
{
},
},
},
"principals":
{
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
"archived":
{
"unknownuser":
{
},
"unknowngroup":
{
},
"unknownuser.1":
{
},
"unknowngroup.1":
{
},
},
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_calendarsUpgradeWithOrphans(self):\n\n before = {\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n \"unknowngroup\":\n {\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithUnknownFiles(self):\n\n ignoredUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"garbage.ics\": {\n \"@contents\": \"Oops, not actually an ICS file.\",\n },\n \"other-file.txt\": {\n \"@contents\": \"Also not a calendar collection.\"\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n before = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": ignoredUIDContents,\n },\n \"principals\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": ignoredUIDContents,\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithError(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n root = self.createHierarchy(before)\n\n config.DocumentRoot = root\n config.DataRoot = root\n\n (yield self.doUpgrade(config))\n\n self.assertTrue(self.verifyHierarchy(root, after))",
"def test_calendarsUpgradeWithNestedCollections(self):\n\n beforeUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"nested1\": {\n \"nested2\": {},\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n afterUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \".collection.nested1\": {\n \"nested2\": {},\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n before = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": beforeUIDContents,\n },\n \"principals\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": afterUIDContents,\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithNoChange(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\\r\\n\"),\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\\r\\n\"),\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def validate_archive(self):\n\t\t# refresh the archive pool searches\n\t\tself.pool_search(refresh=True)\n\t\tif self.status == 'archived':\n\t\t\treturn True\n\t\treturn False",
"def test_directory_path_with_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=True)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (with-calendar)\",\n )\n assert directory_path == exist_directory_path",
"def test_noTimedEventsExtraction(self):\n userbase.extractUserStore(self.account, self.destdir)\n self.assertEqual(\n self.ls.accountByAddress(self.localpart, self.domain),\n None)\n\n self.assertFalse(list(self.store.query(SubStore, SubStore.storepath == self.origdir)))\n self.origdir.restat(False)\n self.assertFalse(self.origdir.exists())\n self.assertFalse(list(self.store.query(_SubSchedulerParentHook)))",
"def ____test_sys_archived(self):\n # TODO archinging not working\n with ts.SetupDbAndCredentials() as s:\n args = [\"--archived\", \"--skip-albums\", \"--start-date\", \"2017-01-01\"]\n s.test_setup(\n \"test_sys_archived\", args=args, trash_files=True, trash_db=True\n )\n s.gp.start(s.parsed_args)\n\n db = LocalData(s.root)\n\n # Total of 1 out of media items\n db.cur.execute(\"SELECT COUNT() FROM SyncFiles\")\n count = db.cur.fetchone()\n self.assertEqual(1, count[0])",
"def test_calendarsUpgradeWithInboxItems(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n \"oldinboxitem\": {\n \"@contents\": \"\",\n \"@timestamp\": 1, # really old file\n },\n \"newinboxitem\": {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"inboxitems.txt\":\n {\n \"@contents\": None, # ignore contents, the paths inside are random test directory paths\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n \"newinboxitem\": {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def test_directory_path_without_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=False)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (without-calendar)\",\n )\n assert directory_path == exist_directory_path",
"def test_archived(self):\n doc = DocumentFactory(title=u'impalas', locale=u'en-US', is_archived=True)\n ApprovedRevisionFactory(document=doc, summary=u'impalas', is_approved=True)\n\n self.refresh()\n\n # include_archived gets the above document\n qs = {'q': 'impalas', 'a': 1, 'w': 1, 'format': 'json',\n 'include_archived': 'on'}\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_(1, len(results))\n\n # no include_archived gets you nothing since the only\n # document in the index is archived\n qs = {'q': 'impalas', 'a': 0, 'w': 1, 'format': 'json'}\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_(0, len(results))",
"def test_files_missing():\n\tfiledir = \"./goes_files/%Y_events/%Y%m%devents.txt\"\n\tt0 = timerange.start.datetime\n\tdays = [t0]\n\twhile timerange.end.datetime > t0:\n\t\tt0 = t0 + relativedelta(days=1)\n\t\tdays.append(t0)\n\n\tmissing_files = []\n\tfor d in days:\n\t\tif not os.path.exists(d.strftime(filedir)):\n\t\t\tmissing_files.append(d.strftime(filedir))\n\tprint(missing_files)",
"def test_delete_all_archives_failure(self):\n support = saltsupport.SaltSupportModule()\n support.archives = MagicMock(\n return_value=[\n \"/mnt/storage/one-support-000-000.bz2\",\n \"/mnt/storage/two-support-111-111.bz2\",\n \"/mnt/storage/three-support-222-222.bz2\",\n ]\n )\n ret = support.delete_archives()\n assert \"files\" in ret\n assert \"errors\" in ret\n assert bool(ret[\"errors\"])\n assert bool(ret[\"files\"])\n assert isinstance(ret[\"errors\"], dict)\n assert isinstance(ret[\"files\"], dict)\n\n assert ret[\"files\"][\"/mnt/storage/three-support-222-222.bz2\"] == \"removed\"\n assert ret[\"files\"][\"/mnt/storage/one-support-000-000.bz2\"] == \"left\"\n assert ret[\"files\"][\"/mnt/storage/two-support-111-111.bz2\"] == \"left\"\n\n assert len(ret[\"errors\"]) == 2\n assert (\n ret[\"errors\"][\"/mnt/storage/one-support-000-000.bz2\"]\n == \"Decreasing electron flux\"\n )\n assert (\n ret[\"errors\"][\"/mnt/storage/two-support-111-111.bz2\"]\n == \"Solar flares interference\"\n )",
"def dwnlds_archived(self, replace_path=None, new_path=None):\n raise EODataDownException(\"Not implemented.\")",
"def check_delete_calendar_list_in_list(model_list, calendar_id):\n log_info(\"Compare delete calendar list.\\nList CalendarID:\\n{model_list}\\nExpected CalendarID:\\n{calendar_id}\"\n .format(model_list='\\n'.join(str(item.cal_id) for item in model_list), calendar_id=calendar_id))\n for item in model_list:\n if item.cal_id == calendar_id:\n assert False, \"Current calendar id is presented\"",
"def check_articles_to_archive():\n from crawler.core.models import Article\n # Filter articles that can't be archived\n articles = Article.objects.all().should_be_archived()\n articles = articles.should_be_archived()\n # queryset of articles needing immediate archiving\n archive_articles_qs = detect_notfound(\n articles.not_found_only_tagged()\n ).union(\n articles.release_date_tagged()\n ).union(\n articles.priority_tagged()\n )\n\n archive_articles.apply_async(\n ids=list(set(archive_articles_qs.values_list('pk'))),\n skip_filter=True\n )",
"def test_ftw_journal_is_not_versioned_archeologist(self):\n self.login(self.regular_user)\n\n self.create_version(self.document)\n\n repo_tool = api.portal.get_tool('portal_repository')\n shadow_history = repo_tool.getHistoryMetadata(self.document)\n self.assertEquals(2, len(shadow_history))\n\n for version_number in range(len(shadow_history)):\n archeologist = Archeologist(\n self.document, repo_tool.retrieve(\n self.document, selector=version_number))\n\n archived_obj = archeologist.excavate()\n archived_ann = IAnnotations(archived_obj)\n self.assertNotIn(self.JOURNAL_KEY, archived_ann)",
"def test_admin_calendar_admin_list(self):\n response = self.client.get(\"/admin/appointment/calendar/\")\n self.assertEqual(response.status_code, 200)",
"def test_removeNotificationDirectories(self):\n\n before = {\n \"calendars\": {\n \"users\": {\n \"wsanchez\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"notifications\": {\n \"sample-notification.xml\": {\n \"@contents\": \"<?xml version='1.0'>\\n<should-be-ignored />\"\n }\n }\n }\n }\n }\n }\n\n after = {\n \"calendars\": {\n \"__uids__\": {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n }\n }\n }\n }\n },\n \".calendarserver_version\": {\n \"@contents\": \"2\",\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def check_date(dates):\n\n # Loads file list from raw and processed data dirs\n data_dir = basedir + '/app/static/data/saved'\n data_files = [f for f in os.listdir(data_dir) if not f.startswith('.DS')]\n\n print data_files\n\n raw_data_dir = basedir + '/app/static/data/raw'\n raw_data_files = [f for f in os.listdir(raw_data_dir) if not f.startswith('.DS')]\n\n # If neither a raw or processed file exists, we haven't collected it\n # Sorts these uncollected files by date for reference\n if data_files or raw_data_files:\n saved_dates = []\n raw_dates = []\n\n if data_files:\n saved_dates = [data_file.split('.')[0] for data_file in data_files]\n saved_dates = sorted(saved_dates, reverse=True)\n if raw_data_files:\n raw_dates = [raw_file.split('.')[0] for raw_file in raw_data_files]\n raw_dates = sorted(raw_dates, reverse=True)\n\n uncrawled_dates = []\n for date in dates:\n if date not in saved_dates and date not in raw_dates:\n uncrawled_dates.append(date)\n else:\n uncrawled_dates = dates\n\n print 'Uncrawled file dates'\n print uncrawled_dates\n print ''\n\n return uncrawled_dates",
"def test_enter_without_dirs(self):\n archive = DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR)\n\n self.assertEqual(archive.user, self.user)\n self.assertEqual(archive.working_dir_path, DATA_DOWNLOADS_WORKING_DIR)\n\n self.assertIsNone(archive.tmp_dir_path)\n self.assertIsNone(archive.data_dir_path)",
"def cleanup(self):\n\tprint \"clean up on \" + self.dest\n for root, folders, files in os.walk(self.dest):\n for ignore_dir in self.ignore_dirs:\n if ignore_dir in folders:\n folders.remove(ignore_dir)\n\t\t \n for folder in folders:\n backupdir = os.path.join(root,folders)\n sourcedir = bakupdir.replace(destination,source) \n if not os.path.exists(sourcedir):\n trash = backupdir.replace(destination,trash_dir)\n # shutil.move(backupdir, trash)\n print(\"move\",backupdir,\"to\",trash)\n # os.utime(trash, None)\n \n for filename in files:\n checkfile = root + \"/\" + filename\n checkfile = checkfile.replace(self.dest, self.source)\n print(\"checking if \", checkfile, \"exists\")\n if not os.path.exists(checkfile): \n print os.path.join(root,filename)\n\t\t backupfile = checkfile.replace(self.source,self.dest)\n trash = self.trash + checkfile.replace(self.source, \"\")\n # shutil.move(backupfile, trash)\n print(\"move\",backupfile,\"to\",trash)\n # os.utime(trash, None)",
"def cleanup(self, archive, files):\n mtime = self.test(archive, files)\n backup_home = os.path.join(self.download_dir, '-')\n if not os.path.exists(backup_home):\n os.makedirs(backup_home)\n backup_dir = tempfile.mkdtemp('', datetime.utcnow().strftime(\"%Y-%m-%d_\"), backup_home)\n for file in files:\n os.makedirs(os.path.join(backup_dir, file))\n if os.path.getmtime(file) != mtime[file]:\n raise RuntimeError(\"Failed to cleanup archived data: %s has been modified.\" % file)\n os.rename(file, os.path.join(backup_dir, file))\n self.log.debug(\"Moved %s to %s\" % (file, os.path.join(backup_dir, file)))\n return",
"def obter_pastas_inexistentes_no_destino(self):\n \n #laço em todas as subpasta da pasta de origem\n for pasta in self.sub_pastas_nao_finalizadas:\n \n #forma um path da pasta de destino com a subpasta da vez\n pasta_destino = os.path.join(self.pasta_destino.obter_caminho(),\\\n os.path.basename(pasta))\n \n #verifica se a pasta de destino já existe, senão existir, inclui\n #na lista da operações para criação futura.\n if not os.path.exists(pasta_destino):\n self.__class__.operacoes_criar_pastas.append(pasta_destino)",
"def test_calendarsUpgradeWithUIDs(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Plain XML\n freeBusyAttr: \"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\\r\\n</calendar-free-busy-set>\\r\\n\",\n },\n },\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n },\n \"@xattrs\":\n {\n cTagAttr: isValidCTag, # method below\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_history_import_relpath_in_archive():\n dest_parent = mkdtemp()\n with HistoryArchive(arcname_prefix='../insecure') as history_archive:\n\n history_archive.write_metafiles()\n history_archive.write_file('datasets/Pasted_Entry_1.txt', 'foo')\n history_archive.finalize()\n _run_unpack(history_archive, dest_parent, 'Relative parent path in import archive allowed')",
"def test_sys_album_add_file(self):\n with ts.SetupDbAndCredentials() as s:\n args = [\"--start-date\", \"2017-09-19\", \"--end-date\", \"2017-09-20\"]\n s.test_setup(\n \"test_sys_album_add_file\", args=args, trash_db=True, trash_files=True\n )\n s.gp.start(s.parsed_args)\n\n pat = str(albums_root / \"2017\" / \"0923 Clones😀\" / \"*.*\")\n files = sorted(s.root.glob(pat))\n self.assertEqual(4, len(files))\n\n # spoof the album to pretend it only got 3 files up to 2017-09-20\n db = LocalData(s.root)\n db.cur.execute(\n \"UPDATE Albums SET EndDate='2017-09-20',\"\n \"Size=3 WHERE \"\n \"AlbumName='Clones😀'\"\n )\n db.store()\n\n args = [\n \"--start-date\",\n \"2017-09-19\",\n \"--end-date\",\n \"2017-09-23\",\n \"--index-only\",\n ]\n s.__exit__()\n s.test_setup(\"test_sys_album_add_file\", args=args)\n s.gp.start(s.parsed_args)\n\n # the rescan will reset the date so set it back\n db = LocalData(s.root)\n db.cur.execute(\n \"UPDATE Albums SET EndDate='2017-09-20' \" \"WHERE AlbumName='Clones😀'\"\n )\n db.store()\n\n args = [\"--skip-index\", \"--skip-files\"]\n s.__exit__()\n s.test_setup(\"test_sys_album_add_file\", args=args)\n s.gp.start(s.parsed_args)\n\n pat = str(albums_root / \"2017\" / \"0920 Clones😀\" / \"*.*\")\n files = sorted(s.root.glob(pat))\n self.assertEqual(4, len(files))\n should_be_gone = s.root / albums_root / \"2017\" / \"0923 Clones😀\"\n self.assertFalse(should_be_gone.exists())\n\n # test --album-date-by-first-photo\n\n # force re-download of the album\n db.cur.execute(\n \"UPDATE Albums SET Downloaded=0 \" \"WHERE AlbumName='Clones😀'\"\n )\n db.store()\n args = [\"--skip-index\", \"--skip-files\", \"--album-date-by-first-photo\"]\n s.__exit__()\n s.test_setup(\"test_sys_album_add_file\", args=args)\n s.gp.start(s.parsed_args)\n\n pat = str(albums_root / \"2017\" / \"0919 Clones😀\" / \"*.*\")\n files = sorted(s.root.glob(pat))\n self.assertEqual(4, len(files))\n\n should_be_gone = s.root / albums_root.absolute() / \"2017\" / \"0920 Clones😀\"\n self.assertFalse(should_be_gone.exists())",
"def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def check_missing_unique_link():\n from mspray.apps.main.utils import queryset_iterator\n\n queryset = SprayDay.objects.filter(spraypoint__isnull=True).only(\n \"pk\", \"location_id\"\n )\n for record in queryset_iterator(queryset):\n add_unique_record(record.pk, record.location_id)\n gc.collect()"
]
| [
"0.7058937",
"0.64720345",
"0.590238",
"0.5872786",
"0.5592633",
"0.5588349",
"0.5517519",
"0.5494793",
"0.53246284",
"0.5299591",
"0.5261831",
"0.52328426",
"0.52309614",
"0.5229098",
"0.51864475",
"0.5152008",
"0.5129799",
"0.5129616",
"0.509695",
"0.5096544",
"0.5079429",
"0.50733143",
"0.5068733",
"0.50491923",
"0.5014291",
"0.5007934",
"0.49841923",
"0.49750224",
"0.49602744",
"0.4958215"
]
| 0.68933755 | 1 |
Unknown files, including .DS_Store files at any point in the hierarchy, as well as nondirectory in a user's calendar home, will be ignored and not interrupt an upgrade. | def test_calendarsUpgradeWithUnknownFiles(self):
ignoredUIDContents = {
"64": {
"23": {
"6423F94A-6B76-4A3A-815B-D52CFD77935D": {
"calendar": {
db_basename: {
"@contents": "",
},
},
"garbage.ics": {
"@contents": "Oops, not actually an ICS file.",
},
"other-file.txt": {
"@contents": "Also not a calendar collection."
},
}
}
},
".DS_Store": {
"@contents": "",
}
}
before = {
".DS_Store":
{
"@contents": "",
},
"calendars":
{
".DS_Store":
{
"@contents": "",
},
"__uids__": ignoredUIDContents,
},
"principals":
{
".DS_Store":
{
"@contents": "",
},
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
".DS_Store":
{
"@contents": "",
},
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
".DS_Store":
{
"@contents": "",
},
"__uids__": ignoredUIDContents,
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CheckForUnknownFiles(self):\n unknown_files = self.GetUnknownFiles()\n if unknown_files:\n print \"The following files are not added to version control:\"\n for line in unknown_files:\n print line\n prompt = \"Are you sure to continue?(y/N) \"\n answer = raw_input(prompt).strip()\n if answer != \"y\":\n ErrorExit(\"User aborted\")",
"def CheckForUnknownFiles(self):\r\n unknown_files = self.GetUnknownFiles()\r\n if unknown_files:\r\n print \"The following files are not added to version control:\"\r\n for line in unknown_files:\r\n print line\r\n prompt = \"Are you sure to continue?(y/N) \"\r\n answer = raw_input(prompt).strip()\r\n if answer != \"y\":\r\n ErrorExit(\"User aborted\")",
"def GetUnknownFiles(self):\r\n raise NotImplementedError(\r\n \"abstract method -- subclass %s must override\" % self.__class__)",
"def GetUnknownFiles(self):\n raise NotImplementedError(\n \"abstract method -- subclass %s must override\" % self.__class__)",
"def ignore_from_repo(self, directory, ignore):\n for filename in os.listdir(directory):\n if not filename.endswith('.rpm'):\n continue\n _, basename = filename.split('-', 1)\n ignore.add(basename[:-4])",
"def dir_noaccess(self,fullname):\n pass",
"def test_localstatedir(self):\n self.chck_triple('localstatedir')",
"def process_IN_ISDIR(self, event):",
"def set_unknown_paths(self, path):\n\n for wadfile in self.files:\n if not wadfile.path:\n if wadfile.ext:\n wadfile.path = f\"{path}/{wadfile.path_hash:016x}.{wadfile.ext}\"\n else:\n wadfile.path = f\"{path}/{wadfile.path_hash:016x}\"",
"def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')",
"def clean():\n for root, dirs, files in os.walk('.'):\n for item in dirs:\n if (item[0]!='.'):\n try:\n os.remove(os.path.join(item,'.DS_Store'))\n except:\n pass",
"def check_for_missing_files(self, path):\n return None",
"def GetUnknownFiles(self):\r\n args = []\r\n status = RunShell([\"hg\", \"status\", \"--rev\", self.base_rev, \"-u\", \".\"],\r\n silent_ok=True)\r\n unknown_files = []\r\n for line in status.splitlines():\r\n st, fn = line.split(\" \", 1)\r\n if st == \"?\":\r\n unknown_files.append(fn)\r\n return unknown_files",
"def GetUnknownFiles(self):\n args = []\n status = RunShell([\"hg\", \"status\", \"--rev\", self.base_rev, \"-u\", \".\"],\n silent_ok=True)\n unknown_files = []\n for line in status.splitlines():\n st, fn = line.split(\" \", 1)\n if st == \"?\":\n unknown_files.append(fn)\n return unknown_files",
"def standardize_excl(self) -> None:\n # attempt to protect against augeas error in 0.10.0 - ubuntu\n # *.augsave -> /*.augsave upon augeas.load()\n # Try to avoid bad httpd files\n # There has to be a better way... but after a day and a half of testing\n # I had no luck\n # This is a hack... work around... submit to augeas if still not fixed\n\n excl = [\"*.augnew\", \"*.augsave\", \"*.dpkg-dist\", \"*.dpkg-bak\",\n \"*.dpkg-new\", \"*.dpkg-old\", \"*.rpmsave\", \"*.rpmnew\",\n \"*~\",\n self.root + \"/*.augsave\",\n self.root + \"/*~\",\n self.root + \"/*/*augsave\",\n self.root + \"/*/*~\",\n self.root + \"/*/*/*.augsave\",\n self.root + \"/*/*/*~\"]\n\n for i, excluded in enumerate(excl, 1):\n self.aug.set(\"/augeas/load/Httpd/excl[%d]\" % i, excluded)\n\n self.aug.load()",
"def edMusFiles(rootDir):\n\n #Check for zip files directory first, then go through music files\n if os.path.exists(os.path.join(rootDir, 'MUSzipFiles')) == False:\n print(\"MUSzipFiles does not exist, trying to make...\")\n os.makedirs(os.path.join(rootDir, 'MUSzipFiles'))\n\n for root, dirs, files in os.walk(rootDir, topdown=False):\n \n for name in files:\n \n if \"desktop.ini\" in name:\n os.remove(os.path.join(root, name))\n print(\"dektop.ini removed!\") \n \n elif \"Folder.jpg\" in name:\n os.remove(os.path.join(root, name))\n print(\"Folder.jpg removed!\")\n \n elif \"_Small.jpg\" in name:\n os.remove(os.path.join(root, name))\n print(\"_Small.jpg removed!\")\n\n elif \"AlbumArtSmall.jpg\" in name:\n os.remove(os.path.join(root, name))\n print(\"AlbumArtSmall.jpg removed!\")\n \n elif \"README\" in name:\n os.remove(os.path.join(root, name))\n print(\"README removed!\")\n \n #Set so that if the file is already in zip file folder, leave it alone.\n elif \".zip\" in name:\n if (os.path.isfile(os.path.join(rootDir, 'MUSzipFiles', name)) == False): \n os.rename(os.path.join(root, name), os.path.join(rootDir, 'MUSzipFiles', name))\n print(\"Moved {0} to MUSzipFiles!\".format(name)) \n\n elif '.7z' in name:\n if (os.path.isfile(os.path.join(rootDir, 'MUSzipFiles', name)) == False):\n os.rename(os.path.join(root, name), os.path.join(rootDir, 'MUSzipFiles', name))\n print(\"Moved {0} to MUSzipFiles!\".format(name))",
"def invalidate_for_files(self):\n return []",
"def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')",
"def invalidate_for_files(self):\r\n return []",
"def dir_unchecked():\n return abspath('unchecked')",
"def _EnsureDirsSafeForUpdate(self, dirs):\n for d in dirs:\n if not d:\n d = 'null'\n if d.lstrip(os.sep).lower() in self.unsafe_update_dirs:\n raise CommandException('EnsureDirsSafeForUpdate: encountered unsafe '\n 'directory (%s); aborting update' % d)",
"def test_fallback_path_is_not_file_or_dir(self):\n context = Context(SSLv23_METHOD)\n context._fallback_default_verify_paths([], [])\n context._fallback_default_verify_paths([\"/not/a/file\"], [\"/not/a/dir\"])",
"def ignore(func):\n return lambda path : None if path.lower().endswith(\n # add whatever makes sense to the list\n ('.git', \n '__pycache__',\n '.md', '.txt', 'license', '.png', '.jpg', '.jpeg')\n ) else func(path)",
"def test_ignores(self, tmpdir):\n from pytest_flake8 import Ignorer\n ignores = [\"E203\", \"b/?.py E204 W205\", \"z.py ALL\", \"*.py E300\"]\n ign = Ignorer(ignores)\n assert ign(tmpdir.join(\"a/b/x.py\")) == \"E203 E204 W205 E300\".split()\n assert ign(tmpdir.join(\"a/y.py\")) == \"E203 E300\".split()\n assert ign(tmpdir.join(\"a/z.py\")) is None",
"def unknown(self):\n self.add_file_string('Unknown file')\n self.should_copy = False",
"def test_files_missing():\n\tfiledir = \"./goes_files/%Y_events/%Y%m%devents.txt\"\n\tt0 = timerange.start.datetime\n\tdays = [t0]\n\twhile timerange.end.datetime > t0:\n\t\tt0 = t0 + relativedelta(days=1)\n\t\tdays.append(t0)\n\n\tmissing_files = []\n\tfor d in days:\n\t\tif not os.path.exists(d.strftime(filedir)):\n\t\t\tmissing_files.append(d.strftime(filedir))\n\tprint(missing_files)",
"def test_noTimedEventsExtraction(self):\n userbase.extractUserStore(self.account, self.destdir)\n self.assertEqual(\n self.ls.accountByAddress(self.localpart, self.domain),\n None)\n\n self.assertFalse(list(self.store.query(SubStore, SubStore.storepath == self.origdir)))\n self.origdir.restat(False)\n self.assertFalse(self.origdir.exists())\n self.assertFalse(list(self.store.query(_SubSchedulerParentHook)))",
"def RemoveJumpListFiles():\n if chromium_utils.IsWindows():\n custom_destination_path = os.path.join(os.environ['USERPROFILE'],\n 'AppData',\n 'Roaming',\n 'Microsoft',\n 'Windows',\n 'Recent',\n 'CustomDestinations')\n LogAndRemoveFiles(custom_destination_path, '.+')",
"def _remove_files_dirs(self):\n if self.remove_remote_files_dirs:\n self._remove_remote_files_dirs()",
"def test_calendarsUpgradeWithOrphans(self):\n\n before = {\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n \"unknowngroup\":\n {\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))"
]
| [
"0.61615694",
"0.61526537",
"0.590852",
"0.5865593",
"0.58169883",
"0.5709954",
"0.5677684",
"0.56375474",
"0.5619872",
"0.5615769",
"0.5602505",
"0.5568837",
"0.5514606",
"0.55021447",
"0.5469052",
"0.5466647",
"0.546537",
"0.54529905",
"0.54308003",
"0.54149336",
"0.54028714",
"0.5356639",
"0.5350898",
"0.53353137",
"0.5300782",
"0.52898335",
"0.528744",
"0.5281623",
"0.52727",
"0.52600837"
]
| 0.68915445 | 0 |
Verify that calendar homes in the /calendars/__uids__// form are upgraded to /calendars/__uids__/XX/YY// form | def test_calendarsUpgradeWithUIDs(self):
before = {
"calendars":
{
"__uids__":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_before,
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
# Plain XML
freeBusyAttr: "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n",
},
},
},
},
},
"principals":
{
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_after,
},
"@xattrs":
{
cTagAttr: isValidCTag, # method below
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
freeBusyAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>"),
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_calendarsUpgradeWithDuplicateOrphans(self):\n\n before = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n \"unknowngroup\":\n {\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n \"unknownuser.1\":\n {\n },\n \"unknowngroup.1\":\n {\n },\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithOrphans(self):\n\n before = {\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n \"unknowngroup\":\n {\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithUnknownFiles(self):\n\n ignoredUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"garbage.ics\": {\n \"@contents\": \"Oops, not actually an ICS file.\",\n },\n \"other-file.txt\": {\n \"@contents\": \"Also not a calendar collection.\"\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n before = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": ignoredUIDContents,\n },\n \"principals\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": ignoredUIDContents,\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithError(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n root = self.createHierarchy(before)\n\n config.DocumentRoot = root\n config.DataRoot = root\n\n (yield self.doUpgrade(config))\n\n self.assertTrue(self.verifyHierarchy(root, after))",
"def test_calendarsUpgradeWithUIDsMultilevel(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n \"@xattrs\":\n {\n md5Attr: \"12345\",\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: \"12345\",\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: isValidCTag, # method below\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithNestedCollections(self):\n\n beforeUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"nested1\": {\n \"nested2\": {},\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n afterUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \".collection.nested1\": {\n \"nested2\": {},\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n before = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": beforeUIDContents,\n },\n \"principals\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": afterUIDContents,\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithNoChange(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\\r\\n\"),\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\\r\\n\"),\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def test_calendarsUpgradeWithTypes(self):\n\n before = {\n \"calendars\":\n {\n \"users\":\n {\n \"wsanchez\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n \"@xattrs\":\n {\n md5Attr: \"12345\",\n },\n },\n \"@xattrs\":\n {\n cTagAttr: \"12345\",\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Pickled XML Doc\n freeBusyAttr: cPickle.dumps(WebDAVDocument.fromString(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\\r\\n</calendar-free-busy-set>\\r\\n\").root_element),\n },\n },\n },\n },\n \"groups\":\n {\n \"managers\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n cTagAttr: isValidCTag, # method below\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\"),\n },\n },\n },\n },\n },\n \"9F\":\n {\n \"F6\":\n {\n \"9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def test_calendarsUpgradeWithInboxItems(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n \"oldinboxitem\": {\n \"@contents\": \"\",\n \"@timestamp\": 1, # really old file\n },\n \"newinboxitem\": {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"inboxitems.txt\":\n {\n \"@contents\": None, # ignore contents, the paths inside are random test directory paths\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n \"newinboxitem\": {\n \"@contents\": \"\",\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def test_admin_calendar_admin_list(self):\n response = self.client.get(\"/admin/appointment/calendar/\")\n self.assertEqual(response.status_code, 200)",
"def test_normalizeCUAddrs(self):\n\n class StubRecord(object):\n\n def __init__(self, fullNames, uid, cuas):\n self.fullNames = fullNames\n self.uid = uid\n self.calendarUserAddresses = cuas\n\n def getCUType(self):\n return \"INDIVIDUAL\"\n\n @property\n def displayName(self):\n return self.fullNames[0]\n\n class StubDirectory(object):\n\n def __init__(self):\n self.count = 0\n\n def recordWithCalendarUserAddress(self, cuaddr):\n self.count += 1\n record = records.get(cuaddr, None)\n if record is not None:\n return succeed(record)\n else:\n raise Exception\n\n records = {\n \"mailto:[email protected]\":\n StubRecord((\"User A\",), u\"123\", (\"mailto:[email protected]\", \"urn:x-uid:123\")),\n \"mailto:[email protected]\":\n StubRecord((\"User B\",), u\"234\", (\"mailto:[email protected]\", \"urn:x-uid:234\")),\n \"/principals/users/a\":\n StubRecord((\"User A\",), u\"123\", (\"mailto:[email protected]\", \"urn:x-uid:123\")),\n \"/principals/users/b\":\n StubRecord((\"User B\",), u\"234\", (\"mailto:[email protected]\", \"urn:x-uid:234\")),\n }\n\n directory = StubDirectory()\n cuaCache = {}\n yield normalizeCUAddrs(normalizeEvent, directory, cuaCache)\n yield normalizeCUAddrs(normalizeEvent, directory, cuaCache)\n\n # Ensure we only called principalForCalendarUserAddress 3 times. It\n # would have been 8 times without the cuaCache.\n self.assertEquals(directory.count, 3)",
"def test_directory_path_with_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=True)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (with-calendar)\",\n )\n assert directory_path == exist_directory_path",
"def test_objectresource_resourcenameforuid(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n name = yield calendar.resourceNameForUID(\"uid1\")\n self.assertEqual(name, \"1.ics\")\n\n name = yield calendar.resourceNameForUID(\"uid2\")\n self.assertEqual(name, \"2.ics\")\n\n name = yield calendar.resourceNameForUID(\"foo\")\n self.assertEqual(name, None)\n\n yield self.commitTransaction(1)",
"def _testOneResource(self, home, calendar_name, data):\n inbox = yield self.calendarUnderTest(home=home, name=calendar_name)\n objs = yield inbox.objectResources()\n self.assertEqual(len(objs), 1)\n\n caldata = yield objs[0].componentForUser()\n self.assertEqualCalendarData(caldata, data.format(**self.dtsubs))",
"def test_objectresource_setcomponent(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n resource = yield calendar.objectResourceWithName(\"1.ics\")\n changed = yield resource.setComponent(Component.fromString(self.caldata1_changed))\n self.assertFalse(changed)\n caldata = yield resource.component()\n self.assertEqual(normalize_iCalStr(str(caldata)), normalize_iCalStr(self.caldata1_changed))\n yield self.commitTransaction(1)\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n resource01 = yield calendar01.objectResourceWithName(\"1.ics\")\n caldata = yield resource01.component()\n self.assertEqual(normalize_iCalStr(str(caldata)), normalize_iCalStr(self.caldata1_changed))\n yield self.commitTransaction(0)\n\n # Fail to set with different UID\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n resource = yield calendar.objectResourceWithName(\"1.ics\")\n self.assertFailure(\n resource.setComponent(Component.fromString(self.caldata1_failed)),\n InvalidUIDError,\n )\n yield self.commitTransaction(1)",
"def test_calendar_view_update(self):\n request = self.factory.post('/module/calendar/1/', {\n \"caller_name\": \"test\",\n \"survey\": \"1\",\n }, follow=True)\n request.user = self.user\n request.session = {}\n response = calendar_change(request, 1)\n self.assertEqual(response.status_code, 200)\n\n request = self.factory.post('/module/calendar/1/', {'delete': True}, follow=True)\n request.user = self.user\n request.session = {}\n response = calendar_change(request, 1)\n self.assertEqual(response.status_code, 302)",
"async def test_assign_unique_id_failure(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n mock_calendars_list: ApiResult,\n test_api_calendar: dict[str, Any],\n config_entry: MockConfigEntry,\n mock_events_list: ApiResult,\n mock_calendar_get: Callable[[...], None],\n request_status: http.HTTPStatus,\n config_entry_status: ConfigEntryState,\n) -> None:\n\n assert config_entry.state is ConfigEntryState.NOT_LOADED\n assert config_entry.unique_id is None\n\n mock_calendar_get(\n \"primary\",\n {},\n status=request_status,\n )\n\n mock_calendars_list({\"items\": [test_api_calendar]})\n mock_events_list({})\n await component_setup()\n\n assert config_entry.state is config_entry_status\n assert config_entry.unique_id is None",
"async def test_calendar_yaml_error(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n mock_calendars_list: ApiResult,\n test_api_calendar: dict[str, Any],\n mock_events_list: ApiResult,\n) -> None:\n mock_calendars_list({\"items\": [test_api_calendar]})\n mock_events_list({})\n\n with patch(\"homeassistant.components.google.open\", side_effect=FileNotFoundError()):\n assert await component_setup()\n\n assert not hass.states.get(TEST_YAML_ENTITY)\n assert hass.states.get(TEST_API_ENTITY)",
"def test_objectresource_resourceuidforname(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n\n uid = yield calendar.resourceUIDForName(\"1.ics\")\n self.assertEqual(uid, \"uid1\")\n\n uid = yield calendar.resourceUIDForName(\"2.ics\")\n self.assertEqual(uid, \"uid2\")\n\n uid = yield calendar.resourceUIDForName(\"foo.ics\")\n self.assertEqual(uid, None)\n\n yield self.commitTransaction(1)",
"def test_homepage_previous(self):\n create_user()\n login(self.app, 'me1', 'password')\n create_equipment()\n create_events()\n\n response1 = self.app.get('/', follow_redirects=True)\n self.assertEqual(response1.status_code, 200)\n response_text = response1.get_data(as_text=True)\n today = datetime.now().strftime('%Y-%m-%d')\n prev_month_range = calendar.monthrange(int(today[0:4]), int(today[5:7]) - 1)\n date_current = date(int(today[0:4]), int(today[5:7]), int(today[8:10]))\n self.assertIn(str(date_current.month) + \"/\" + str(date_current.day), response_text)\n response2 = self.app.get('/get_calendar/' + str(date(int(today[0:4]), int(today[5:7]), int(today[8:10]) - 1 if int(today[8:10]) > 1 else prev_month_range[1])))\n response_text = response2.get_data(as_text=True)\n self.assertIn(str(date_current.month) + \"/\" + str(date_current.day - 1), response_text)\n self.assertEqual(response2.status_code, 200)\n self.assertIn('Calendar', response_text)\n self.assertIn('Logout', response_text)\n self.assertIn('eq1', response_text)\n self.assertIn('New equipment', response_text)\n self.assertIn('Next Day', response_text)\n self.assertIn('Previous Day', response_text)\n\n self.assertNotIn('Example1', response_text)\n self.assertNotIn('Example2', response_text)",
"def test_admin_calendar_user_admin_list(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)",
"async def test_init_calendar(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n mock_calendars_list: ApiResult,\n test_api_calendar: dict[str, Any],\n mock_events_list: ApiResult,\n) -> None:\n\n mock_calendars_list({\"items\": [test_api_calendar]})\n mock_events_list({})\n assert await component_setup()\n\n state = hass.states.get(TEST_API_ENTITY)\n assert state\n assert state.name == TEST_API_ENTITY_NAME\n assert state.state == STATE_OFF\n\n # No yaml config loaded that overwrites the entity name\n assert not hass.states.get(TEST_YAML_ENTITY)",
"def test_updateEvent(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n eventprev = dict(start = '2015-08-21T01:23:00.000Z',\n end = '2015-08-21T01:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n eventcurr = dict(start = '2015-08-21T02:23:00.000Z',\n end = '2015-08-21T02:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n eventnext = dict(start = '2015-08-21T03:23:00.000Z',\n end = '2015-08-21T03:25:00.000Z',\n date = '2015-08-21T00:00:00.000Z')\n i=0\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + eventprev['start'] + eventprev['end'])\n uidcurr = str('alex_' + eventcurr['start'] + eventcurr['end'])\n uidnext = str('alex_' + eventnext['start'] + eventnext['end'])\n invuid = '00000000000000000000000'\n\n rv = self.json_post('/createEvent/alex', eventprev)\n assert uid in str(rv.data)\n\n rv = self.json_post('/createEvent/alex', eventnext)\n assert uidnext in str(rv.data)\n\n rv = self.json_post('/createEvent/alex', eventcurr)\n assert uidcurr in str(rv.data)\n\n rv = self.json_post('/updateEvent/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n rv = self.json_get('/getSuggestions/bbbb', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getSuggestions/bbbb', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n assert 'Invalid username' in str(rv.data)\n\n # Set prev event\n rv = self.json_get('/getSuggestions/alex', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n sugId = json.loads(rv.data)['uid']\n placeId = json.loads(rv.data)['business'][1]['id']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uid,\n 'choice': '1',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n rv = self.json_post('/ratePlace/alex', {'uid': placeId,\n 'rating': 5})\n assert 'ratings' in str(rv.data)\n\n rv = self.json_post('/ratePlace/alex', {'uid': placeId,\n 'rating': 4})\n assert 'ratings' in str(rv.data)\n\n # Reset prev event\n rv = self.json_get('/getSuggestions/alex', {'uid': uid,\n 'query': 'Homewood Campus, Baltimore'})\n sugId = json.loads(rv.data)['uid']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uid,\n 'choice': '0',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n # Set next event\n rv = self.json_get('/getSuggestions/alex', {'uid': uidnext,\n 'query': 'Homewood Campus, Baltimore'})\n sugId = json.loads(rv.data)['uid']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uidnext,\n 'choice': '2',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n # Set curr event\n rv = self.json_get('/getSuggestions/alex', {'uid': uidcurr,\n 'query': 'Towson, MD'})\n print(rv.data)\n sugId = json.loads(rv.data)['uid']\n assert 'business' in str(rv.data)\n\n rv = self.json_post('/updateEvent/alex', {'uid': uidnext,\n 'choice': '0',\n 'suggestionId': sugId})\n assert 'yelpId' in str(rv.data)\n\n rv = self.json_get('/getEventFromId/alex', {'uid': invuid})\n assert 'Event not found' in str(rv.data)\n\n rv = self.json_get('/getSuggestions/alex', {'uid': invuid,\n 'query': 'Homewood Campus, Baltimore'})\n assert 'Event not found' in str(rv.data)",
"def test_calendar_user_view_update(self):\n request = self.factory.post('/module/calendar_user/4/', {\n \"caller_name\": \"test\",\n \"survey\": \"1\",\n }, follow=True)\n request.user = self.user\n request.session = {}\n #response = calendar_user_change(request, 3)\n #self.assertEqual(response.status_code, 200)\n\n request = self.factory.post('/module/calendar_user/3/', {'delete': True}, follow=True)\n request.user = self.user\n request.session = {}\n #response = calendar_user_change(request, 3)\n #self.assertEqual(response.status_code, 302)",
"def test_homepage_next(self):\n create_user()\n login(self.app, 'me1', 'password')\n create_equipment()\n create_events()\n\n response1 = self.app.get('/', follow_redirects=True)\n self.assertEqual(response1.status_code, 200)\n response_text = response1.get_data(as_text=True)\n today = datetime.now().strftime('%Y-%m-%d')\n month_range = calendar.monthrange(int(today[0:4]), int(today[5:7]))\n date_current = date(int(today[0:4]), int(today[5:7]), int(today[8:10]))\n self.assertIn(str(date_current.month) + \"/\" + str(date_current.day), response_text)\n response2 = self.app.get('/get_calendar/' + str(date(int(today[0:4]), int(today[5:7]), int(today[8:10]) + 1 if int(today[8:10]) < month_range[1] else 1)))\n response_text = response2.get_data(as_text=True)\n self.assertIn(str(date_current.month) + \"/\" + str(date_current.day + 1), response_text)\n self.assertEqual(response2.status_code, 200)\n self.assertIn('Calendar', response_text)\n self.assertIn('Logout', response_text)\n self.assertIn('eq1', response_text)\n self.assertIn('New equipment', response_text)\n self.assertIn('Next Day', response_text)\n self.assertIn('Previous Day', response_text)\n\n self.assertNotIn('Example1', response_text)\n self.assertNotIn('Example2', response_text)",
"def test_getExistDates(self):\n cases = [\n (self.test_eac + \"NE01201.xml\",\"1858-01-01T00:00:00Z\",\"1935-08-21T00:00:00Z\"),\n (self.test_eac + \"NE00300.xml\",\"1960-01-01T00:00:00Z\",\"1977-12-31T00:00:00Z\"),\n (self.test_eac + \"NE01500.xml\",\"1981-01-01T00:00:00Z\",\"1981-12-31T00:00:00Z\")\n ]\n for case in cases:\n source, expected_from_date, expected_to_date = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com')\n self.assertNotEqual(doc, None)\n fromDate, toDate = doc.getExistDates()\n self.assertEqual(fromDate, expected_from_date)\n self.assertEqual(toDate, expected_to_date)",
"def test_team_template_folders_id_patch(self):\n pass",
"async def test_assign_unique_id(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n mock_calendars_list: ApiResult,\n test_api_calendar: dict[str, Any],\n mock_events_list: ApiResult,\n mock_calendar_get: Callable[[...], None],\n config_entry: MockConfigEntry,\n) -> None:\n\n assert config_entry.state is ConfigEntryState.NOT_LOADED\n assert config_entry.unique_id is None\n\n mock_calendar_get(\n \"primary\",\n {\"id\": EMAIL_ADDRESS, \"summary\": \"Personal\", \"accessRole\": \"owner\"},\n )\n\n mock_calendars_list({\"items\": [test_api_calendar]})\n mock_events_list({})\n assert await component_setup()\n\n assert config_entry.state is ConfigEntryState.LOADED\n assert config_entry.unique_id == EMAIL_ADDRESS",
"def test_removeNotificationDirectories(self):\n\n before = {\n \"calendars\": {\n \"users\": {\n \"wsanchez\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"notifications\": {\n \"sample-notification.xml\": {\n \"@contents\": \"<?xml version='1.0'>\\n<should-be-ignored />\"\n }\n }\n }\n }\n }\n }\n\n after = {\n \"calendars\": {\n \"__uids__\": {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n }\n }\n }\n }\n },\n \".calendarserver_version\": {\n \"@contents\": \"2\",\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after))",
"def test_directory_path_without_calendar():\n downloader = WallpaperDownloader(\"08-2020\")\n directory_path = downloader._get_directory_path(with_calendar=False)\n exist_directory_path = os.path.join(\n downloader.destination_directory_path,\n \"august-2020 (without-calendar)\",\n )\n assert directory_path == exist_directory_path"
]
| [
"0.6521772",
"0.65184355",
"0.644795",
"0.6287781",
"0.6244558",
"0.62066233",
"0.6160223",
"0.5841681",
"0.5369036",
"0.5337584",
"0.5312082",
"0.525256",
"0.5214574",
"0.513125",
"0.5101096",
"0.5082935",
"0.506659",
"0.50127804",
"0.50119203",
"0.49426952",
"0.49366602",
"0.49221918",
"0.49074185",
"0.48802882",
"0.4856312",
"0.48392096",
"0.48193255",
"0.48004487",
"0.48001823",
"0.47941658"
]
| 0.6769738 | 0 |
Ensure that calendar user addresses (CUAs) are cached so we can reduce the number of principal lookup calls during upgrade. | def test_normalizeCUAddrs(self):
class StubRecord(object):
def __init__(self, fullNames, uid, cuas):
self.fullNames = fullNames
self.uid = uid
self.calendarUserAddresses = cuas
def getCUType(self):
return "INDIVIDUAL"
@property
def displayName(self):
return self.fullNames[0]
class StubDirectory(object):
def __init__(self):
self.count = 0
def recordWithCalendarUserAddress(self, cuaddr):
self.count += 1
record = records.get(cuaddr, None)
if record is not None:
return succeed(record)
else:
raise Exception
records = {
"mailto:[email protected]":
StubRecord(("User A",), u"123", ("mailto:[email protected]", "urn:x-uid:123")),
"mailto:[email protected]":
StubRecord(("User B",), u"234", ("mailto:[email protected]", "urn:x-uid:234")),
"/principals/users/a":
StubRecord(("User A",), u"123", ("mailto:[email protected]", "urn:x-uid:123")),
"/principals/users/b":
StubRecord(("User B",), u"234", ("mailto:[email protected]", "urn:x-uid:234")),
}
directory = StubDirectory()
cuaCache = {}
yield normalizeCUAddrs(normalizeEvent, directory, cuaCache)
yield normalizeCUAddrs(normalizeEvent, directory, cuaCache)
# Ensure we only called principalForCalendarUserAddress 3 times. It
# would have been 8 times without the cuaCache.
self.assertEquals(directory.count, 3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _reload_acls(self):\n\t\tself.acls = ACLs()",
"def figure_out_real_accounts(people_involved, people_caches):\n # Using '+' as a filter removes a fair number of WATCHLISTS entries.\n people_involved = set(\n i for i in people_involved\n if ('+' not in i and\n not i.startswith('commit-bot') and\n not i.endswith('gserviceaccount.com')))\n people_involved -= people_caches['fake']\n\n # People we are still unsure about that need to be looked up.\n people_to_look_for = list(people_involved - people_caches['real'])\n\n futures = [\n models.Issue.query(models.Issue.owner == users.User(r)).fetch(\n limit=1, keys_only=True)\n for r in people_to_look_for\n ]\n for i, future in enumerate(futures):\n account_email = people_to_look_for[i]\n if not list(future):\n people_caches['fake'].add(account_email)\n people_involved.remove(account_email)\n else:\n people_caches['real'].add(account_email)\n return people_involved",
"def fix_discovery(self):\r\n for item in self.collections['response']['collections']:\r\n if item['id'] == 'LDSO':\r\n try:\r\n self.collections['LDSO'] = self.get(['links']['self']['href'])\r\n except:\r\n self.lds_user = False\r\n else:\r\n self.lds_user = True\r\n else:\r\n self.collections[item['id']] = item['links']['self']['href']\r\n try:\r\n self.user = self.get_current_user()['response']['users'][0]\r\n except:\r\n self.user = \"\"",
"def __fresh_account(self):\n\t\tshuffle(self.wallet)\n\t\tfor acct in self.wallet:\n\t\t\tif len(get_spent(acct[\"address\"], self.testnet))==0 and len(get_unspent(acct[\"address\"], self.testnet))==0:\n\t\t\t\treturn acct\n\t\traise ValueError(\"No unused addresses available. Run add_accounts()\")",
"async def fix_cache(self, ctx):\n self.initial_config(ctx.message.server.id)\n self.settings[server.id]['usercache'] = []\n self.save_json()",
"def test_lookup_account(self):\n pass",
"def effective_principals(self, principal_id, request=None, context=None):",
"def cleanup(self):\n all_aps_info = self.zd.get_all_ap_info()\n all_aps_ins = self.testbed.components['AP']\n for ap_ins in all_aps_ins:\n for ap_info in all_aps_info:\n if ap_ins.base_mac_addr.upper() == ap_info.get('mac').upper() and ap_info.get('ip_addr') != '':\n ap_ins.ip_addr = ap_info.get('ip_addr')",
"def populate_datacenter_cache(self):\n self._datacenter_cache = {}\n dcs = Datacenter.search()\n for dc in dcs:\n self._datacenter_cache[dc.api_id] = dc",
"def refresh_cache(slack_client):\n user_cache = {}\n userlist = slack_client.api_call('users.list')\n if 'members' not in userlist:\n logging.warning(\"Couldn't get a user cache\")\n return user_cache, time.time()\n \n for member in userlist['members']:\n uid = member['id']\n name = member['name']\n try:\n profile_name = member['profile']['first_name']\n except KeyError:\n profile_name = member['profile']['real_name']\n if profile_name:\n user_cache[uid] = profile_name\n else:\n user_cache[uid] = name\n return user_cache, time.time()",
"def remove_ca_certs_from_systemwide_ca_store(self):\n\n raise NotImplementedError()",
"def test_addresses_list_for_user_zero(self):\n\n user = self.client.users.create({})\n\n with self.assertRaises(MarqetaError):\n self.client.funding_sources.addresses.list_for_user(user.token)",
"def test_patch_user_identity_mapping(self):\n pass",
"def reload_systemwide_ca_store(self):\n\n raise NotImplementedError()",
"def test_user_cache(self):\n original_token = TestExpirableToken(user=self.user)\n token = TestExpirableToken.from_key(original_token.key)\n\n def test_init_cache():\n user = original_token.user\n\n def test_user_cache():\n user = token.user\n\n self.assertNumQueries(0, test_init_cache)\n self.assertNumQueries(0, test_user_cache)",
"def test_replace_user_identity_mapping(self):\n pass",
"def clearAuth(self):\n for (when, hostmask) in self.auth:\n users.invalidateCache(hostmask=hostmask)\n self.auth = []",
"def test_reset_ca_list(self):\n cacert = load_certificate(FILETYPE_PEM, root_cert_pem)\n secert = load_certificate(FILETYPE_PEM, server_cert_pem)\n clcert = load_certificate(FILETYPE_PEM, server_cert_pem)\n\n cadesc = cacert.get_subject()\n sedesc = secert.get_subject()\n cldesc = clcert.get_subject()\n\n def changed_ca(ctx):\n ctx.set_client_ca_list([sedesc, cldesc])\n ctx.set_client_ca_list([cadesc])\n return [cadesc]\n\n self._check_client_ca_list(changed_ca)",
"def refresh_cached_account(username, registry):\n settings = registry.settings\n cache_ttl = int(settings.get(\"account_cache_ttl_seconds\", 30))\n cache_key = get_account_cache_key(username, registry)\n cache = registry.cache\n cache_result = cache.expire(cache_key, cache_ttl)\n return cache_result",
"def cleanPMUserCache(cache):\n\n removeUser = []\n now = int(time.time())\n\n for user, utime in cache.items():\n if now > utime:\n log.debug(\"removing author %s from recent list\", user)\n removeUser.append(user)\n\n for ku in removeUser:\n del cache[ku]",
"def _real_account(self, account_name, entries, begin_date=None,\n end_date=None, min_accounts=None):\n if begin_date:\n entries = list(iter_entry_dates(entries, begin_date, end_date))\n if not min_accounts:\n min_accounts = [account_name]\n\n return realization.get(realization.realize(entries, min_accounts),\n account_name)",
"def certificate_in_use_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n acm = session.client(\"acm\")\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for carn in list_certificates(cache, session):\n # Get ACM Cert Details\n cert = acm.describe_certificate(CertificateArn=carn)[\"Certificate\"]\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(cert,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n cDomainName = str(cert['DomainName'])\n cIssuer = str(cert['Issuer'])\n cSerial = str(cert['Serial'])\n cStatus = str(cert['Status'])\n cKeyAlgo = str(cert['KeyAlgorithm'])\n useLen = len(cert[\"InUseBy\"])\n # this is a failing check\n if useLen == 0:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-in-use-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"LOW\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.2] ACM Certificates should be in use\",\n \"Description\": \"ACM Certificate \"\n + carn\n + \" is currently not in use, this can be indicative of an orphaned certificate or that the downstream workloads are no longer active (maliciously or not). Refer to the remediation instructions if this configuration is not intended\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on installing certifactes refer to the Services integrated with AWS Certificate Manager section of the AWS Certificate Manager User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/acm/latest/userguide/acm-services.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-2\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 PM-5\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.1.1\",\n \"ISO 27001:2013 A.8.1.2\",\n \"ISO 27001:2013 A.12.5.1\",\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": carn + \"/acm-cert-in-use-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": carn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\"Software and Configuration Checks/AWS Security Best Practices\"],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[ACM.2] ACM Certificates should be in use\",\n \"Description\": \"ACM Certificate \"\n + carn\n + \" is in use.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on installing certifactes refer to the Services integrated with AWS Certificate Manager section of the AWS Certificate Manager User Guide.\",\n \"Url\": \"https://docs.aws.amazon.com/acm/latest/userguide/acm-services.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Security Services\",\n \"AssetService\": \"Amazon Certificate Manager\",\n \"AssetComponent\": \"Certificate\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCertificateManagerCertificate\",\n \"Id\": carn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCertificateManagerCertificate\": {\n \"DomainName\": cDomainName,\n \"Issuer\": cIssuer,\n \"Serial\": cSerial,\n \"KeyAlgorithm\": cKeyAlgo,\n \"Status\": cStatus\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 ID.AM-2\",\n \"NIST SP 800-53 Rev. 4 CM-8\",\n \"NIST SP 800-53 Rev. 4 PM-5\",\n \"AICPA TSC CC3.2\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.1.1\",\n \"ISO 27001:2013 A.8.1.2\",\n \"ISO 27001:2013 A.12.5.1\",\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding",
"def test_calendarsUpgradeWithUIDs(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Plain XML\n freeBusyAttr: \"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\\r\\n</calendar-free-busy-set>\\r\\n\",\n },\n },\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n },\n \"@xattrs\":\n {\n cTagAttr: isValidCTag, # method below\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))",
"def accounts(self): # pragma: no coverage\r\n raise NotImplementedError()",
"def _migrate_users(correct_course_key, role, lower_org):\r\n for user in orm['auth.user'].objects.filter(groups=group).all():\r\n entry = orm['student.courseaccessrole'](\r\n role=role, user=user,\r\n org=correct_course_key.org, course_id=correct_course_key\r\n )\r\n try:\r\n entry.save()\r\n except IntegrityError:\r\n # already stored\r\n pass\r\n orgs[lower_org] = correct_course_key.org",
"def check_legacy_credentials(user, email):\n legacy_credential = LegacyCredential.objects.filter(email=email,\n migrated=False)\n if legacy_credential:\n legacy_credential = legacy_credential.get()\n user.is_credentialed = True\n # All of them are mimic credentialed\n month, day, year = legacy_credential.mimic_approval_date.split('/')\n dt = datetime(int(year), int(month), int(day))\n dt = pytz.timezone(timezone.get_default_timezone_name()).localize(dt)\n user.credential_datetime = dt\n legacy_credential.migrated = True\n legacy_credential.migration_date = timezone.now()\n legacy_credential.migrated_user = user\n legacy_credential.save()\n user.save()",
"def test_set_empty_ca_list(self):\n\n def no_ca(ctx):\n ctx.set_client_ca_list([])\n return []\n\n self._check_client_ca_list(no_ca)",
"def ExclusiveAddressUse(self) -> bool:",
"def ExclusiveAddressUse(self) -> bool:",
"def ExclusiveAddressUse(self) -> bool:"
]
| [
"0.5056765",
"0.50430185",
"0.50411654",
"0.49688035",
"0.48858058",
"0.48572317",
"0.4816586",
"0.48022962",
"0.48006323",
"0.47516596",
"0.4751512",
"0.47317553",
"0.47303107",
"0.4713648",
"0.46938983",
"0.4681661",
"0.4647238",
"0.46310037",
"0.46113184",
"0.4600518",
"0.4593005",
"0.45836166",
"0.45722625",
"0.4559407",
"0.45464194",
"0.45448837",
"0.45398194",
"0.45334294",
"0.45334294",
"0.45334294"
]
| 0.65721285 | 0 |
Verify conversion of old resources.xml format to twext.who.xml format | def test_resourcesXML(self):
fileName = self.mktemp()
fp = FilePath(fileName)
fp.setContent(oldResourcesFormat)
upgradeResourcesXML(fp)
self.assertEquals(fp.getContent(), newResourcesFormat) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_augmentsXML(self):\n fileName = self.mktemp()\n fp = FilePath(fileName)\n fp.setContent(oldAugmentsFormat)\n upgradeAugmentsXML(fp)\n self.assertEquals(fp.getContent(), newAugmentsFormat)",
"def _check_deprecated_openerp_xml_node(self):\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n openerp_nodes = doc.xpath(\"/openerp\") \\\n if not isinstance(doc, string_types) else []\n if openerp_nodes:\n lineno = openerp_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True",
"def _check_xml_syntax_error(self):\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((\n xml_file, result.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True",
"def _check_xml_attribute_translatable(self):\n if (self.linter._all_options['valid_odoo_versions'].config\n .valid_odoo_versions != ['8.0']):\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(\n (\"%s:%d\" % (xml_file, record.sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True",
"def _check_redundant_modulename_xml(self):\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(\n xml_file, self.module):\n self.msg_args.append(\n (\"%s:%d\" % (xml_file_rel, lineno), xml_id))\n if self.msg_args:\n return False\n return True",
"def test_recover_from_bad_xml(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n jp2 = Jp2k(self._bad_xml_file)\n\n self.assertEqual(jp2.box[3].box_id, 'xml ')\n self.assertEqual(jp2.box[3].offset, 77)\n self.assertEqual(jp2.box[3].length, 64)\n self.assertEqual(ET.tostring(jp2.box[3].xml.getroot()),\n b'<test>this is a test</test>')",
"def test_invalidate_if_resource_dependency_change(self):\n workflow1 = self.get_workflow(\n \"\"\"file://result <- file://file1\n Some code\n\"\"\")\n workflow2 = self.get_workflow(\n \"\"\"file://result <- file://file2\n Some code\n\"\"\")\n invalid = workflow1.resources_not_created_the_same_way(workflow2)\n assert len(invalid) == 1, [(res.url, reason._reason) for (res, reason,) in invalid]\n (resource, invalidation_reason) = invalid[0]\n assert resource.url == \"file://result\"\n assert invalidation_reason == NOT_SAME_INPUTS",
"def _check_character_not_valid_in_resource_link(self):\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml'):\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n for name, attr in (('link', 'href'), ('script', 'src')):\n nodes = (doc.xpath('.//%s[@%s]' % (name, attr))\n if not isinstance(doc, string_types) else [])\n for node in nodes:\n resource = node.get(attr, '')\n ext = os.path.splitext(os.path.basename(resource))[1]\n if (resource.startswith('/') and not\n re.search('^[.][a-zA-Z]+$', ext)):\n self.msg_args.append((\"%s:%s\" % (xml_file,\n node.sourceline)))\n if self.msg_args:\n return False\n return True",
"def check_wf(self,wf):\n pass",
"def test_migrate_interpretation_request_rd(self):\n old_instance = GenericFactoryAvro.get_factory_avro(\n self.old_model.InterpretationRequestRD, VERSION_300, fill_nullables=False\n ).create() # reports_3_0_0.InterpretationRequestRD\n self._validate(old_instance)\n migrated_instance = MigrateReports3To4().migrate_interpretation_request_rd(old_instance=old_instance)\n self._validate(migrated_instance)\n\n old_big_wigs = old_instance.bigWigs\n new_big_wigs = migrated_instance.bigWigs\n\n if old_big_wigs is not None:\n for old_big_wig, new_big_wig in zip(old_big_wigs, new_big_wigs):\n self.assertIsInstance(new_big_wig, self.new_model.File)\n self.assertEqual(new_big_wig.sampleId, old_big_wig.SampleId)\n self.assertEqual(new_big_wig.uriFile, old_big_wig.URIFile)\n self.assertEqual(new_big_wig.fileType, old_big_wig.fileType)\n self.assertEqual(new_big_wig.md5Sum, None)\n\n old_instance = GenericFactoryAvro.get_factory_avro(\n self.old_model.InterpretationRequestRD, VERSION_300, fill_nullables=True\n ).create() # reports_3_0_0.InterpretationRequestRD\n self._validate(old_instance)\n migrated_instance = MigrateReports3To4().migrate_interpretation_request_rd(old_instance=old_instance)\n\n for old_variant, new_variant in zip(old_instance.TieredVariants, migrated_instance.tieredVariants):\n for old_re, new_re in zip(old_variant.reportEvents, new_variant.reportEvents):\n self.assertEqual(old_re.genomicFeature.HGNC, new_re.genomicFeature.hgnc)\n\n self._validate(migrated_instance)",
"def _check_deprecated_data_xml_node(self):\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath(\"/odoo\") \\\n if not isinstance(doc, string_types) else []\n children, data_node = ((odoo_nodes[0].getchildren(),\n odoo_nodes[0].findall('data'))\n if odoo_nodes else ([], []))\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True",
"def resources():\n check_resources()",
"def _check_duplicate_xml_record_id(self):\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in \\\n self._get_duplicate_xml_record_id(xml_records).items():\n self.msg_args.append((\n \"%s:%d\" % (os.path.relpath(fobjs[0].base, self.module_path),\n fobjs[0].sourceline),\n name,\n ', '.join([os.path.relpath(fobj.base, self.module_path) +\n ':' + str(fobj.sourceline)\n for fobj in fobjs[1:]]),\n ))\n if self.msg_args:\n return False\n return True",
"def test_does_validate_valid_xml_file(self):\n xml_file = join(\n getcwd(), 'testdata', 'newstest2019-defr-src-ts.de.FIXED.xml'\n )\n doc = valitest.ValidatableTestSet(xml_file)\n self.assertEqual(doc.setid, \"newstest2019\")\n self.assertEqual(doc.srclang, \"any\")",
"def test_invalidate_removed_resource(self):\n workflow1 = self.get_workflow(\n \"\"\"file://file2 <- file://file1\n\nfile://file3 <- file://file1\n\"\"\")\n workflow2 = self.get_workflow(\n \"\"\"file://file3 <- file://file1\n\"\"\")\n\n invalid = workflow1.resources_not_created_the_same_way(workflow2)\n assert len(invalid) == 1, [(res.url, reason._reason) for (res, reason,) in invalid]\n (resource, invalidation_reason) = invalid[0]\n assert resource.url == \"file://file2\"\n assert invalidation_reason == NO_LONGER_CREATED, invalidation_reason",
"def mergeResXml(copyFrom, copyTo):\n\n if not os.path.exists(copyTo):\n return False\n\n aryXml = ['strings.xml', 'styles.xml', 'colors.xml', 'dimens.xml', 'ids.xml', 'attrs.xml', 'integers.xml',\n 'arrays.xml', 'bools.xml', 'drawables.xml']\n basename = os.path.basename(copyFrom)\n\n if basename in aryXml:\n if utils_config.is_py_env_2():\n f = open(copyTo)\n else:\n f = open(copyTo, 'r', encoding='utf-8')\n targetContent = f.read()\n f.close()\n\n fromTree = ET.parse(copyFrom)\n fromRoot = fromTree.getroot()\n toTree = ET.parse(copyTo)\n toRoot = toTree.getroot()\n for node in list(fromRoot):\n val = node.get('name')\n if val != None and len(val) > 0:\n valMatched = '\"' + val + '\"'\n attrIndex = targetContent.find(valMatched)\n if -1 == attrIndex:\n toRoot.append(node)\n else:\n utils_log.warning(\"The node %s is already exists in %s\", val, basename)\n\n toTree.write(copyTo, 'UTF-8')\n return True\n return False",
"def checkMissionXML(self):\n # -- It is not included for simplifity --#\n self.mission_xml_as_expected = 'Unknown'",
"def test_equality(self):\n tools.eq_(self.old_manifest, load_manifest(StringIO(old_manifest)))",
"def test_do_not_need_alternate(self):\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-fail.xml'\n ))\n os.unlink(os.path.join(self.reports_dir,\n 'TEST-testutil.manual_test.LintTest-success.xml'\n ))\n actual = self._analyze_make_output()\n self.assertEqual(1, actual)\n self.assertIn('E999 lint error from txt-file.', self.errors[0])",
"def reference_segmenter_validation(self):\n for file in filter(lambda x: x.endswith('referenceSegmenter.tei.xml'), listdir(self.bs_directory)):\n print \"Processing\", file\n bs = BeautifulSoup(open(self.bs_directory + file), 'xml')\n self.__reference_segmenter_correction(bs)\n file = open(self.reference_segmenter_output + file, \"wb\")\n file.write(bs.prettify().encode('utf-8'))",
"async def test_update_with_xml_convert_bad_xml(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n headers={\"content-type\": \"text/xml\"},\n content=\"\",\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.toplevel.master_value }}\",\n \"json_attributes\": [\"key\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n state = hass.states.get(\"sensor.foo\")\n\n assert state.state == STATE_UNKNOWN\n assert \"REST xml result could not be parsed\" in caplog.text\n assert \"Empty reply\" in caplog.text",
"def validate_tariff(self):\n\t\treturn True",
"def test_XmlDump_compare_all(self):\n self._compare_variants(True)",
"def test_valid_xml(self):\r\n self.build_problem()\r\n self.assertTrue(True)",
"def test_incomplete_xml(self):\n self.__opener.contents = '<Report></Report>>'\n self.assertEqual(-1, self.__uft.failed_tests('url'))",
"def test_xml_files_with_missing_info():\n\n # Test when k is missing from constant type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/k_const.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when A is missing from Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/A_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when E is missing from Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/E_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when A is missing from modified Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/A_mod_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when b is missing from modified Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/b_mod_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when E is missing from modified Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/E_mod_arr.xml\"\n parser = XMLParser(xml_filename)",
"def _check_xml_deprecated_tree_attribute(self):\n checks = [\n {\n 'attr': 'colors',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},\n 'xpath': './/tree[@colors]',\n },\n {\n 'attr': 'fonts',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},\n 'xpath': './/tree[@fonts]',\n },\n {\n 'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'},\n 'xpath': './/tree[@string]',\n },\n ]\n valid_versions = set(\n self.linter._all_options['valid_odoo_versions'].config\n .valid_odoo_versions)\n\n applicable_checks = [check for check in checks if (\n check['attr'] in self.config.deprecated_tree_attributes and\n bool(valid_versions - check['skip_versions']))]\n\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file),\n model='ir.ui.view'):\n\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append((\n '%s:%d' % (xml_file, record.sourceline),\n check['attr']))\n if self.msg_args:\n return False\n return True",
"def test_xml_safety_flag(self):\r\n\r\n self._setstaff_login()\r\n response = self._add_edx4edx()\r\n self.assertIn('GIT_IMPORT_WITH_XMLMODULESTORE', response.content)\r\n\r\n def_ms = modulestore()\r\n course = def_ms.courses.get('{0}/edx4edx_lite'.format(\r\n os.path.abspath(settings.DATA_DIR)), None)\r\n self.assertIsNone(course)",
"def test_resource_only_content_type(self):\n\n def do_check(path):\n \"\"\"The contents of the .iml file should certain sourceFolder entries:\n\n <sourceFolder url=\".../testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_and_code\" isTestSource=\"false\" />\n <sourceFolder url=\".../testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_only\" type=\"java-resource\" />\n <sourceFolder url=\".../testprojects/src/resources/org/pantsbuild/testproject/idearesourcesonly\" type=\"java-resource\" />\n ...\n \"\"\"\n found = set()\n iml_file = os.path.join(path, 'project.iml')\n self.assertTrue(os.path.exists(iml_file))\n dom = minidom.parse(iml_file)\n for sourceFolder in self._get_sourceFolders(dom):\n url = sourceFolder.getAttribute('url')\n is_test_source = sourceFolder.getAttribute('isTestSource')\n type_attr = sourceFolder.getAttribute('type')\n url = re.sub(r'^.*/testprojects/', 'testprojects/', url)\n found.add(url)\n if url == 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/code':\n self.assertEquals('', type_attr)\n self.assertEquals('False', is_test_source)\n if url == 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/code':\n self.assertEquals('', type_attr)\n self.assertEquals('True', is_test_source)\n if url == 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_only':\n self.assertEquals('java-resource', type_attr)\n self.assertEquals('False', is_test_source)\n # TODO(Eric Ayers) tests/resources/.../idearesourcesonly : this directory has no\n # junit_tests depending on a target, so it is assumed to be plain resources.\n # Since this is under .../tests, humans know this is supposed to be a test only\n # resource. Right now we don't have a good way of communicating\n # that to the idea goal other than inferring from the presence of junit_tests in\n # source_root, which may not be a reliable indicator.\n if url == 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/resources_only':\n self.assertEquals('java-resource', type_attr)\n self.assertEquals('False', is_test_source)\n if url == 'testprojects/src/resources/org/pantsbuild/testproject/idearesourcesonly':\n self.assertEquals('java-resource', type_attr)\n self.assertEquals('False', is_test_source)\n if url == 'testprojects/tests/resources/org/pantsbuild/testproject/idearesourcesonly':\n self.assertEquals('java-test-resource', type_attr)\n self.assertEquals('True', is_test_source)\n\n self.assertEquals(set([\n 'testprojects/src/resources/org/pantsbuild/testproject/idearesourcesonly',\n 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/code',\n 'testprojects/tests/resources/org/pantsbuild/testproject/idearesourcesonly',\n 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/code',\n 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly/resources_only',\n 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly/resources_only'\n ]), found)\n\n self._idea_test([\n 'testprojects/src/java/org/pantsbuild/testproject/idearesourcesonly::',\n 'testprojects/tests/java/org/pantsbuild/testproject/idearesourcesonly::'\n ], check_func=do_check)",
"def test_invalid_xml_box(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n jp2k = Jp2k(self._bad_xml_file)\n\n self.assertEqual(jp2k.box[3].box_id, 'xml ')\n self.assertEqual(jp2k.box[3].offset, 77)\n self.assertEqual(jp2k.box[3].length, 28)\n self.assertIsNone(jp2k.box[3].xml)"
]
| [
"0.57672864",
"0.56299335",
"0.5602613",
"0.5601678",
"0.5579377",
"0.5514357",
"0.5493907",
"0.5473611",
"0.5369198",
"0.5347428",
"0.5339022",
"0.5327907",
"0.53258663",
"0.5279402",
"0.5250793",
"0.52303827",
"0.51986897",
"0.5194378",
"0.5167916",
"0.51619595",
"0.5104468",
"0.50756776",
"0.50723296",
"0.50557315",
"0.50512534",
"0.5049086",
"0.5047703",
"0.50315166",
"0.5029645",
"0.5025727"
]
| 0.7313695 | 0 |
Spams transactions with the same nonce, and ensures the server rejects all but one | async def test_transaction_nonce_lock(self):
no_tests = 20
txs = []
tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)
dtx = decode_transaction(tx)
txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))
for i in range(11, 10 + no_tests):
tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** i)
self.assertEqual(decode_transaction(tx).nonce, dtx.nonce)
txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))
responses = await asyncio.gather(*(to_asyncio_future(self.fetch("/tx", method="POST", body={"tx": tx})) for tx in txs))
ok = 0
bad = 0
for resp in responses:
if resp.code == 200:
ok += 1
else:
bad += 1
self.assertEqual(ok, 1)
self.assertEqual(bad, no_tests - 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_prevent_out_of_order_txs(self):\n\n tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx1 = decode_transaction(tx1)\n stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY)\n tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1.nonce + 1)\n stx2 = sign_transaction(tx2, FAUCET_PRIVATE_KEY)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 400, resp.body)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx1})\n self.assertEqual(resp.code, 200, resp.body)\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 200, resp.body)",
"def test_nonce(mocker):\n transaction = Transaction(\n chain=0,\n nonce=14_294_967_296,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)\n\n transaction.nonce = 1_260_300\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.nonce = -1\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)",
"def test_issuance(logger, dbsession, web3, private_key_hex):\n\n # Creating transactions\n txs = deploy_token_contracts(logger, dbsession, \"testing\", web3,\n ethereum_abi_file=None,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n name=\"Moo Corp\",\n symbol=\"MOO\",\n amount=9999,\n transfer_restriction=\"unrestricted\")\n assert len(txs) == 5\n\n # Send transactions to emphmereal test chain\n txs = broadcast(logger,\n dbsession,\n \"testing\",\n web3,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n )\n assert len(txs) == 5\n\n # Check they got mined\n txs = update_status(logger,\n dbsession,\n \"testing\",\n web3,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n )\n assert len(txs) == 5\n for tx in txs: # type: PreparedTransaction\n assert tx.result_transaction_success\n\n token_address = txs[0].contract_address\n\n # Check that we can view the token status\n status = contract_status(logger,\n dbsession,\n \"testing\",\n web3,\n ethereum_abi_file=None,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n token_contract=token_address,\n )\n\n assert status[\"name\"] == \"Moo Corp\"\n assert status[\"totalSupply\"] == 9999 * 10**18\n assert status[\"totalSupply\"] == status[\"broadcastBalance\"]",
"def test_double_validate_skip_non_eurotoken(self):\n db = MockDatabase()\n\n G = db.owner\n\n A1 = TestBlock(\n block_type=BlockTypes.CHECKPOINT,\n transaction={'balance': 0},\n links=G\n )\n result, errors = A1.validate_transaction(db)\n self.assertEqual(result, ValidationResult.valid)\n self.assertEqual(errors, [])\n db.add_block(A1)\n\n A2 = TestBlock(\n block_type=\"NonEurotoken\",\n transaction={},\n previous=A1\n )\n db.add_block(A2)\n\n A3 = TestBlock(\n block_type=BlockTypes.CHECKPOINT,\n transaction={'balance': 0},\n previous=A2,\n links=G\n )\n result, errors = A3.validate_transaction(db)\n self.assertEqual(result, ValidationResult.valid)\n self.assertEqual(errors, [])",
"def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):\n guess = (str(transactions)+str(last_hash)+str(nonce)).encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n return guess_hash[:difficulty] == '0'*difficulty",
"def mine(self, block):\r\n for n in range(self.maxNonce):\r\n if int(block.generate_hash(), 16) <= self.chain.targetHash:\r\n self.chain.add(block)\r\n break\r\n else:\r\n block.nonce += 1",
"def confirm_transactions():\n s = TimedSerializer(current_app.config['rpc_signature'])\n data = s.loads(request.data)\n\n # basic checking of input\n try:\n assert len(data['coin_txid']) == 64\n assert isinstance(data['pids'], list)\n assert isinstance(data['bids'], list)\n for id in data['pids']:\n assert isinstance(id, int)\n for id in data['bids']:\n assert isinstance(id, int)\n except AssertionError:\n current_app.logger.warn(\"Invalid data passed to confirm\", exc_info=True)\n abort(400)\n\n coin_trans = Transaction.create(data['coin_txid'])\n db.session.flush()\n Payout.query.filter(Payout.id.in_(data['pids'])).update(\n {Payout.transaction_id: coin_trans.txid}, synchronize_session=False)\n BonusPayout.query.filter(BonusPayout.id.in_(data['bids'])).update(\n {BonusPayout.transaction_id: coin_trans.txid}, synchronize_session=False)\n db.session.commit()\n return s.dumps(True)",
"def nonce():\n return random.randint(0, 4294967295)",
"def nonce():\n return random.randint(0, 4294967295)",
"def test_no_combine_with_fee(self):\n unspents_single = [Unspent(5000, 0, '', '', 0)]\n unspents_original = [Unspent(5000, 0, '', '', 0), Unspent(5000, 0, '', '', 0)]\n outputs_original = [(RETURN_ADDRESS, 1000, 'satoshi')]\n\n unspents, outputs = sanitize_tx_data(\n unspents_original,\n outputs_original,\n fee=1,\n leftover=RETURN_ADDRESS,\n combine=False,\n message=None,\n version='test',\n )\n\n unspents_single, outputs_single = sanitize_tx_data(\n unspents_single,\n outputs_original,\n fee=1,\n leftover=RETURN_ADDRESS,\n combine=False,\n message=None,\n version='test',\n )\n\n assert unspents == [Unspent(5000, 0, '', '', 0)]\n assert unspents_single == [Unspent(5000, 0, '', '', 0)]\n assert len(outputs) == 2\n assert len(outputs_single) == 2\n assert outputs[1][0] == RETURN_ADDRESS\n assert outputs_single[1][0] == RETURN_ADDRESS\n assert outputs[1][1] == outputs_single[1][1]",
"def test_blind_sig_expiration(self):\n signer_obj = ECCBlind(year=2020, month=1)\n point_r = signer_obj.signer_init()\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n msg = os.urandom(64)\n msg_blinded = requester_obj.create_signing_request(point_r, msg)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n verifier_obj = ECCBlind(pubkey=signer_obj.pubkey())\n self.assertFalse(verifier_obj.verify(msg, signature))",
"def test_validate_chain_with_tempered_block_nonce(self):\n miner_address = 'miner_address'\n\n blockchain = Blockchain()\n last_block = blockchain.mine(miner_address)\n\n # First we look that a new block could be mined\n self.assertIsNotNone(last_block)\n\n chain = blockchain.full_chain\n\n # Hack a block\n chain.append(Block(1, [], 1, last_block.hash))\n\n self.assertFalse(blockchain.validate_chain(blockchain.full_chain))",
"def nonceVerification(nonce, decryptedNonce):\r\n if (nonce == decryptedNonce):\r\n status = \"150 OK\"\r\n else:\r\n status = \"400 Error\"\r\n return status",
"def test_transfer_blocked(chain, token, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=False)\n with pytest.raises(ValueError):\n token.transact({\"from\": shareholder1}).transfer(boogieman, 4000)",
"def transaction():\n data = jsonpickle.decode(request.get_data())\n address = data[\"address\"]\n amount = int(data[\"amount\"])\n keyname = data[\"keyname\"]\n\n pkplus, pkminus = wallet.keys(keyname)\n\n my_balance = p2p.query(\"/balance\", address=pkplus)[\"balance\"]\n if my_balance < amount:\n abort(404, description=\"Not enough funds.\")\n\n my_utxo = p2p.query(\"/find-utxos\", address=pkplus, amount=amount)[\"utxos\"]\n rem = sum(utxo.amount for utxo in my_utxo) - amount\n address_amount = [(address, amount)]\n\n assert rem >= 0\n\n if rem > 0:\n address_amount.append((pkplus, rem))\n\n tx = build_transaction(my_utxo, address_amount, pkminus)\n try:\n p2p.broadcast(\"/transaction-pool\", transaction=tx)\n return SUCCESSFUL_PATCH\n except UnsuccessfulPatch:\n payload = jsonpickle.encode(\n {\"message\": \"Transaction wasn't accepted by the network.\"})\n return payload, 420, {\"ContentType\": \"application/json\"}",
"def test_transfer_bypass_token(chain, token, carrier, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=True)\n with pytest.raises(ValueError):\n # This call must always come from token contract\n carrier.transact().transfer(shareholder1, boogieman, True)",
"def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertFalse(confirmed, description)\n\n self.relate.confirm = False",
"def test_modify_transaction_after_signing(mocker):\n transaction_original = Transaction(\n chain=0,\n nonce=4_294_967_295,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n )\n\n transaction = transaction_original.sign(PRIVATE_KEY_1)\n transaction.value = 10_000_000\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_INVALID_SIGNATURE\n ):\n transaction.validate(raise_exception=True)",
"def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)",
"def test_sign_offline(cluster):\n # 1. first create two hd new wallet\n seed = \"dune car envelope chuckle elbow slight proud fury remove candy uphold \\\n puzzle call select sibling sport gadget please want vault glance verb damage gown\"\n wallet_1 = Wallet(seed)\n address_1 = wallet_1.address\n wallet_2 = Wallet.new()\n address_2 = wallet_2.address\n\n sender_addr = cluster.address(\"signer1\")\n\n sender_balance = cluster.balance(sender_addr)\n assert sender_balance > 100 * 10 ** 8\n balance_1 = cluster.balance(wallet_1.address)\n assert balance_1 == 0\n balance_2 = cluster.balance(wallet_2.address)\n assert balance_2 == 0\n\n # 2. transfer some coin to wallet_1\n cluster.transfer(sender_addr, address_1, \"100cro\")\n wait_for_new_blocks(cluster, 2)\n\n assert cluster.balance(sender_addr) == sender_balance - 100 * 10 ** 8\n assert cluster.balance(address_1) == 100 * 10 ** 8\n\n # 3. get the send's account info\n port = ports.api_port(cluster.base_port(0))\n api = ApiUtil(port)\n\n amount = 1 * 10 ** 8\n # make transaction without/with fee\n for fee in [0, 600000]:\n sender_account_info = api.account_info(address_1)\n balance_1_before = api.balance(address_1)\n balance_2_before = api.balance(address_2)\n tx = Transaction(\n wallet=wallet_1,\n account_num=sender_account_info[\"account_num\"],\n sequence=sender_account_info[\"sequence\"],\n chain_id=cluster.chain_id,\n fee=fee,\n )\n tx.add_transfer(to_address=address_2, amount=amount, base_denom=\"basecro\")\n signed_tx = tx.get_pushable()\n assert isinstance(signed_tx, dict)\n api.broadcast_tx(signed_tx)\n wait_for_new_blocks(cluster, 3)\n balance_1_after = api.balance(address_1)\n balance_2_after = api.balance(address_2)\n assert balance_2_after == balance_2_before + amount\n assert balance_1_after == balance_1_before - amount - fee",
"def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)",
"def test_participate_bad_signature(chain, crowdsale, customer, customer_id, token):\n\n address_bytes = get_address_as_bytes(customer)\n sign_data = sign(address_bytes, private_key)\n\n time_travel(chain, crowdsale.call().startsAt() + 1)\n wei_value = to_wei(1, \"ether\")\n assert crowdsale.call().getState() == CrowdsaleState.Funding\n\n sign_data[\"s_bytes\"] = b'ABC' # Corrupt signature data\n\n with pytest.raises(TransactionFailed):\n crowdsale.transact({\"from\": customer, \"value\": wei_value}).buyWithSignedAddress(customer_id, sign_data[\"v\"], sign_data[\"r_bytes\"], sign_data[\"s_bytes\"])",
"def testSendRequestWithoutSignatureFails(pool):\n\n async def go(ctx):\n client1, wallet = genTestClient(ctx.nodeset, tmpdir=ctx.tmpdir)\n\n # remove the client's ability to sign\n assert wallet.defaultId\n\n ctx.looper.add(client1)\n await client1.ensureConnectedToNodes()\n\n request = wallet.signOp(op=randomOperation())\n request.signature = None\n request = client1.submitReqs(request)[0]\n with pytest.raises(AssertionError):\n for node in ctx.nodeset:\n await eventually(\n checkLastClientReqForNode, node, request,\n retryWait=1, timeout=10)\n\n for n in ctx.nodeset:\n params = n.spylog.getLastParams(Node.handleInvalidClientMsg)\n ex = params['ex']\n _, frm = params['wrappedMsg']\n assert isinstance(ex, EmptySignature)\n assert frm == client1.stackName\n\n params = n.spylog.getLastParams(Node.discard)\n reason = params[\"reason\"]\n (msg, frm) = params[\"msg\"]\n assert msg == request.__dict__\n assert frm == client1.stackName\n assert \"EmptySignature\" in reason\n\n pool.run(go)",
"def nonceVerification(nonce, decryptedNonce):\n #Enter code to compare the nonce and the decryptedNonce. This method\n # should return a string of \"200 OK\" if the parameters match otherwise\n # it should return \"400 Error Detected\"\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"",
"def prepare_funding(self):\n entity_miner = self.entities[0]\n\n entity_miner.send_bitcoins(entity_miner.address)\n entity_miner.purchase_mastercoins(500.0)\n\n self.generate_block()\n self.check_balance(entity_miner.address, MSC, '50000.00000000', '0.00000000')\n self.check_balance(entity_miner.address, TMSC, '50000.00000000', '0.00000000')",
"def test_unlock_sig(mocker):\n transaction = Transaction(\n chain=1,\n nonce=10,\n fee=50,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate_fields() == True\n assert transaction.validate_fields(raise_exception=True) == True\n\n transaction.unlock_sig = binascii.unhexlify(\"0\" * 258)\n assert transaction.validate_fields() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_SIGNATURE\n ):\n transaction.validate_fields(raise_exception=True)\n\n transaction.unlock_sig = binascii.unhexlify(\"1\" * 256)\n assert transaction.validate_fields() == True\n assert transaction.validate_fields(raise_exception=True) == True",
"def _fund(src_acc, accounts, amount, shard_index):\n if not accounts:\n return []\n hashes = []\n for account in accounts:\n from_address = cli.get_address(src_acc)\n to_address = cli.get_address(account)\n passphrase = get_passphrase(src_acc)\n h = send_transaction(from_address, to_address, shard_index, shard_index, amount,\n passphrase=passphrase, retry=True, wait=True)\n if h is None:\n raise RuntimeError(f\"Failed to send tx from {from_address} to {to_address}\")\n hashes.append(h)\n return hashes",
"def nonceVerification(nonce, decryptedNonce):\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"",
"def useNonce(self, server_url, timestamp, salt):\n\n if is_nonce_old(timestamp):\n return False\n\n try:\n mist_nonces = MistNonce.objects(server_url=server_url, salt=salt,\n timestamp=timestamp)\n except me.DoesNotExist:\n mist_nonces = []\n\n if len(mist_nonces) == 0:\n print(\"Timestamp = %s\" % timestamp)\n MistNonce(\n server_url=server_url, salt=salt, timestamp=timestamp\n ).save()\n return True\n\n return False",
"def test_signature_and_dlrne_fails_on_wrong_secret():\n mG = BilinearGroupPair()\n keypair = BBSPlusKeypair.generate(mG, 9)\n messages = [Bn(30), Bn(31), Bn(32)]\n pk, sk = keypair.pk, keypair.sk\n generators, h0 = keypair.generators, keypair.h0\n\n creator = BBSPlusSignatureCreator(pk)\n lhs = creator.commit(messages)\n presignature = sk.sign(lhs.com_message)\n signature = creator.obtain_signature(presignature)\n e, s, m1, m2, m3 = (Secret() for _ in range(5))\n secret_dict = {\n e: signature.e,\n s: signature.s,\n m1: messages[0],\n m2: messages[1],\n m3: messages[2],\n }\n\n sigproof = BBSPlusSignatureStmt([e, s, m1, m2, m3], pk, signature)\n\n g1 = mG.G1.generator()\n pg1 = signature.s * g1\n pg2, g2 = mG.G1.order().random() * g1, mG.G1.order().random() * g1\n dneq = DLNotEqual((pg1, g1), (pg2, g2), s, bind=True)\n\n secrets = [Secret() for _ in range(5)]\n sigproof1 = BBSPlusSignatureStmt(secrets, pk, signature)\n dneq1 = DLNotEqual((pg1, g1), (pg2, g2), secrets[1], bind=True)\n\n andp = sigproof & dneq\n andp1 = sigproof1 & dneq1\n prov = andp.get_prover(secret_dict)\n\n prov.subs[1].secret_values[s] = signature.s + 1\n ver = andp1.get_verifier()\n\n ver.process_precommitment(prov.precommit())\n\n commitment = prov.commit()\n\n challenge = ver.send_challenge(commitment)\n responses = prov.compute_response(challenge)\n with pytest.raises(ValidationError):\n ver.verify(responses)"
]
| [
"0.69079286",
"0.58825755",
"0.5746995",
"0.5733096",
"0.5711697",
"0.567918",
"0.56385994",
"0.5612486",
"0.5612486",
"0.55686194",
"0.5554134",
"0.55391735",
"0.54671746",
"0.5456013",
"0.54439116",
"0.5411669",
"0.537741",
"0.5359163",
"0.53529143",
"0.5338359",
"0.5334859",
"0.531487",
"0.53114974",
"0.5282851",
"0.52726394",
"0.5261222",
"0.5251959",
"0.5250221",
"0.5247287",
"0.5245956"
]
| 0.7153444 | 0 |
Spams transactions with the same nonce, and ensures the server rejects all but one | async def test_prevent_out_of_order_txs(self):
tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)
dtx1 = decode_transaction(tx1)
stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY)
tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1.nonce + 1)
stx2 = sign_transaction(tx2, FAUCET_PRIVATE_KEY)
resp = await self.fetch("/tx", method="POST", body={"tx": stx2})
self.assertEqual(resp.code, 400, resp.body)
resp = await self.fetch("/tx", method="POST", body={"tx": stx1})
self.assertEqual(resp.code, 200, resp.body)
resp = await self.fetch("/tx", method="POST", body={"tx": stx2})
self.assertEqual(resp.code, 200, resp.body) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_transaction_nonce_lock(self):\n\n no_tests = 20\n\n txs = []\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx = decode_transaction(tx)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n for i in range(11, 10 + no_tests):\n tx = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** i)\n self.assertEqual(decode_transaction(tx).nonce, dtx.nonce)\n txs.append(sign_transaction(tx, FAUCET_PRIVATE_KEY))\n\n responses = await asyncio.gather(*(to_asyncio_future(self.fetch(\"/tx\", method=\"POST\", body={\"tx\": tx})) for tx in txs))\n\n ok = 0\n bad = 0\n for resp in responses:\n if resp.code == 200:\n ok += 1\n else:\n bad += 1\n self.assertEqual(ok, 1)\n self.assertEqual(bad, no_tests - 1)",
"def test_nonce(mocker):\n transaction = Transaction(\n chain=0,\n nonce=14_294_967_296,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)\n\n transaction.nonce = 1_260_300\n assert transaction.validate() == True\n assert transaction.validate(raise_exception=True) == True\n\n transaction.nonce = -1\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_NONCE\n ):\n transaction.validate(raise_exception=True)",
"def test_issuance(logger, dbsession, web3, private_key_hex):\n\n # Creating transactions\n txs = deploy_token_contracts(logger, dbsession, \"testing\", web3,\n ethereum_abi_file=None,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n name=\"Moo Corp\",\n symbol=\"MOO\",\n amount=9999,\n transfer_restriction=\"unrestricted\")\n assert len(txs) == 5\n\n # Send transactions to emphmereal test chain\n txs = broadcast(logger,\n dbsession,\n \"testing\",\n web3,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n )\n assert len(txs) == 5\n\n # Check they got mined\n txs = update_status(logger,\n dbsession,\n \"testing\",\n web3,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n )\n assert len(txs) == 5\n for tx in txs: # type: PreparedTransaction\n assert tx.result_transaction_success\n\n token_address = txs[0].contract_address\n\n # Check that we can view the token status\n status = contract_status(logger,\n dbsession,\n \"testing\",\n web3,\n ethereum_abi_file=None,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n token_contract=token_address,\n )\n\n assert status[\"name\"] == \"Moo Corp\"\n assert status[\"totalSupply\"] == 9999 * 10**18\n assert status[\"totalSupply\"] == status[\"broadcastBalance\"]",
"def test_double_validate_skip_non_eurotoken(self):\n db = MockDatabase()\n\n G = db.owner\n\n A1 = TestBlock(\n block_type=BlockTypes.CHECKPOINT,\n transaction={'balance': 0},\n links=G\n )\n result, errors = A1.validate_transaction(db)\n self.assertEqual(result, ValidationResult.valid)\n self.assertEqual(errors, [])\n db.add_block(A1)\n\n A2 = TestBlock(\n block_type=\"NonEurotoken\",\n transaction={},\n previous=A1\n )\n db.add_block(A2)\n\n A3 = TestBlock(\n block_type=BlockTypes.CHECKPOINT,\n transaction={'balance': 0},\n previous=A2,\n links=G\n )\n result, errors = A3.validate_transaction(db)\n self.assertEqual(result, ValidationResult.valid)\n self.assertEqual(errors, [])",
"def valid_proof(transactions, last_hash, nonce, difficulty=MINING_DIFFICULTY):\n guess = (str(transactions)+str(last_hash)+str(nonce)).encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n return guess_hash[:difficulty] == '0'*difficulty",
"def mine(self, block):\r\n for n in range(self.maxNonce):\r\n if int(block.generate_hash(), 16) <= self.chain.targetHash:\r\n self.chain.add(block)\r\n break\r\n else:\r\n block.nonce += 1",
"def confirm_transactions():\n s = TimedSerializer(current_app.config['rpc_signature'])\n data = s.loads(request.data)\n\n # basic checking of input\n try:\n assert len(data['coin_txid']) == 64\n assert isinstance(data['pids'], list)\n assert isinstance(data['bids'], list)\n for id in data['pids']:\n assert isinstance(id, int)\n for id in data['bids']:\n assert isinstance(id, int)\n except AssertionError:\n current_app.logger.warn(\"Invalid data passed to confirm\", exc_info=True)\n abort(400)\n\n coin_trans = Transaction.create(data['coin_txid'])\n db.session.flush()\n Payout.query.filter(Payout.id.in_(data['pids'])).update(\n {Payout.transaction_id: coin_trans.txid}, synchronize_session=False)\n BonusPayout.query.filter(BonusPayout.id.in_(data['bids'])).update(\n {BonusPayout.transaction_id: coin_trans.txid}, synchronize_session=False)\n db.session.commit()\n return s.dumps(True)",
"def nonce():\n return random.randint(0, 4294967295)",
"def nonce():\n return random.randint(0, 4294967295)",
"def test_no_combine_with_fee(self):\n unspents_single = [Unspent(5000, 0, '', '', 0)]\n unspents_original = [Unspent(5000, 0, '', '', 0), Unspent(5000, 0, '', '', 0)]\n outputs_original = [(RETURN_ADDRESS, 1000, 'satoshi')]\n\n unspents, outputs = sanitize_tx_data(\n unspents_original,\n outputs_original,\n fee=1,\n leftover=RETURN_ADDRESS,\n combine=False,\n message=None,\n version='test',\n )\n\n unspents_single, outputs_single = sanitize_tx_data(\n unspents_single,\n outputs_original,\n fee=1,\n leftover=RETURN_ADDRESS,\n combine=False,\n message=None,\n version='test',\n )\n\n assert unspents == [Unspent(5000, 0, '', '', 0)]\n assert unspents_single == [Unspent(5000, 0, '', '', 0)]\n assert len(outputs) == 2\n assert len(outputs_single) == 2\n assert outputs[1][0] == RETURN_ADDRESS\n assert outputs_single[1][0] == RETURN_ADDRESS\n assert outputs[1][1] == outputs_single[1][1]",
"def test_blind_sig_expiration(self):\n signer_obj = ECCBlind(year=2020, month=1)\n point_r = signer_obj.signer_init()\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n msg = os.urandom(64)\n msg_blinded = requester_obj.create_signing_request(point_r, msg)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n verifier_obj = ECCBlind(pubkey=signer_obj.pubkey())\n self.assertFalse(verifier_obj.verify(msg, signature))",
"def test_validate_chain_with_tempered_block_nonce(self):\n miner_address = 'miner_address'\n\n blockchain = Blockchain()\n last_block = blockchain.mine(miner_address)\n\n # First we look that a new block could be mined\n self.assertIsNotNone(last_block)\n\n chain = blockchain.full_chain\n\n # Hack a block\n chain.append(Block(1, [], 1, last_block.hash))\n\n self.assertFalse(blockchain.validate_chain(blockchain.full_chain))",
"def nonceVerification(nonce, decryptedNonce):\r\n if (nonce == decryptedNonce):\r\n status = \"150 OK\"\r\n else:\r\n status = \"400 Error\"\r\n return status",
"def test_transfer_blocked(chain, token, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=False)\n with pytest.raises(ValueError):\n token.transact({\"from\": shareholder1}).transfer(boogieman, 4000)",
"def transaction():\n data = jsonpickle.decode(request.get_data())\n address = data[\"address\"]\n amount = int(data[\"amount\"])\n keyname = data[\"keyname\"]\n\n pkplus, pkminus = wallet.keys(keyname)\n\n my_balance = p2p.query(\"/balance\", address=pkplus)[\"balance\"]\n if my_balance < amount:\n abort(404, description=\"Not enough funds.\")\n\n my_utxo = p2p.query(\"/find-utxos\", address=pkplus, amount=amount)[\"utxos\"]\n rem = sum(utxo.amount for utxo in my_utxo) - amount\n address_amount = [(address, amount)]\n\n assert rem >= 0\n\n if rem > 0:\n address_amount.append((pkplus, rem))\n\n tx = build_transaction(my_utxo, address_amount, pkminus)\n try:\n p2p.broadcast(\"/transaction-pool\", transaction=tx)\n return SUCCESSFUL_PATCH\n except UnsuccessfulPatch:\n payload = jsonpickle.encode(\n {\"message\": \"Transaction wasn't accepted by the network.\"})\n return payload, 420, {\"ContentType\": \"application/json\"}",
"def test_transfer_bypass_token(chain, token, carrier, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=True)\n with pytest.raises(ValueError):\n # This call must always come from token contract\n carrier.transact().transfer(shareholder1, boogieman, True)",
"def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertFalse(confirmed, description)\n\n self.relate.confirm = False",
"def test_modify_transaction_after_signing(mocker):\n transaction_original = Transaction(\n chain=0,\n nonce=4_294_967_295,\n fee=57000,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n )\n\n transaction = transaction_original.sign(PRIVATE_KEY_1)\n transaction.value = 10_000_000\n\n assert transaction.validate() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_INVALID_SIGNATURE\n ):\n transaction.validate(raise_exception=True)",
"def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)",
"def test_sign_offline(cluster):\n # 1. first create two hd new wallet\n seed = \"dune car envelope chuckle elbow slight proud fury remove candy uphold \\\n puzzle call select sibling sport gadget please want vault glance verb damage gown\"\n wallet_1 = Wallet(seed)\n address_1 = wallet_1.address\n wallet_2 = Wallet.new()\n address_2 = wallet_2.address\n\n sender_addr = cluster.address(\"signer1\")\n\n sender_balance = cluster.balance(sender_addr)\n assert sender_balance > 100 * 10 ** 8\n balance_1 = cluster.balance(wallet_1.address)\n assert balance_1 == 0\n balance_2 = cluster.balance(wallet_2.address)\n assert balance_2 == 0\n\n # 2. transfer some coin to wallet_1\n cluster.transfer(sender_addr, address_1, \"100cro\")\n wait_for_new_blocks(cluster, 2)\n\n assert cluster.balance(sender_addr) == sender_balance - 100 * 10 ** 8\n assert cluster.balance(address_1) == 100 * 10 ** 8\n\n # 3. get the send's account info\n port = ports.api_port(cluster.base_port(0))\n api = ApiUtil(port)\n\n amount = 1 * 10 ** 8\n # make transaction without/with fee\n for fee in [0, 600000]:\n sender_account_info = api.account_info(address_1)\n balance_1_before = api.balance(address_1)\n balance_2_before = api.balance(address_2)\n tx = Transaction(\n wallet=wallet_1,\n account_num=sender_account_info[\"account_num\"],\n sequence=sender_account_info[\"sequence\"],\n chain_id=cluster.chain_id,\n fee=fee,\n )\n tx.add_transfer(to_address=address_2, amount=amount, base_denom=\"basecro\")\n signed_tx = tx.get_pushable()\n assert isinstance(signed_tx, dict)\n api.broadcast_tx(signed_tx)\n wait_for_new_blocks(cluster, 3)\n balance_1_after = api.balance(address_1)\n balance_2_after = api.balance(address_2)\n assert balance_2_after == balance_2_before + amount\n assert balance_1_after == balance_1_before - amount - fee",
"def test_non_contractor_acks_receipt(self):\n res = self.client.post(self.url)\n self.assertEqual(res.status_code, 403)",
"def test_participate_bad_signature(chain, crowdsale, customer, customer_id, token):\n\n address_bytes = get_address_as_bytes(customer)\n sign_data = sign(address_bytes, private_key)\n\n time_travel(chain, crowdsale.call().startsAt() + 1)\n wei_value = to_wei(1, \"ether\")\n assert crowdsale.call().getState() == CrowdsaleState.Funding\n\n sign_data[\"s_bytes\"] = b'ABC' # Corrupt signature data\n\n with pytest.raises(TransactionFailed):\n crowdsale.transact({\"from\": customer, \"value\": wei_value}).buyWithSignedAddress(customer_id, sign_data[\"v\"], sign_data[\"r_bytes\"], sign_data[\"s_bytes\"])",
"def testSendRequestWithoutSignatureFails(pool):\n\n async def go(ctx):\n client1, wallet = genTestClient(ctx.nodeset, tmpdir=ctx.tmpdir)\n\n # remove the client's ability to sign\n assert wallet.defaultId\n\n ctx.looper.add(client1)\n await client1.ensureConnectedToNodes()\n\n request = wallet.signOp(op=randomOperation())\n request.signature = None\n request = client1.submitReqs(request)[0]\n with pytest.raises(AssertionError):\n for node in ctx.nodeset:\n await eventually(\n checkLastClientReqForNode, node, request,\n retryWait=1, timeout=10)\n\n for n in ctx.nodeset:\n params = n.spylog.getLastParams(Node.handleInvalidClientMsg)\n ex = params['ex']\n _, frm = params['wrappedMsg']\n assert isinstance(ex, EmptySignature)\n assert frm == client1.stackName\n\n params = n.spylog.getLastParams(Node.discard)\n reason = params[\"reason\"]\n (msg, frm) = params[\"msg\"]\n assert msg == request.__dict__\n assert frm == client1.stackName\n assert \"EmptySignature\" in reason\n\n pool.run(go)",
"def nonceVerification(nonce, decryptedNonce):\n #Enter code to compare the nonce and the decryptedNonce. This method\n # should return a string of \"200 OK\" if the parameters match otherwise\n # it should return \"400 Error Detected\"\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"",
"def prepare_funding(self):\n entity_miner = self.entities[0]\n\n entity_miner.send_bitcoins(entity_miner.address)\n entity_miner.purchase_mastercoins(500.0)\n\n self.generate_block()\n self.check_balance(entity_miner.address, MSC, '50000.00000000', '0.00000000')\n self.check_balance(entity_miner.address, TMSC, '50000.00000000', '0.00000000')",
"def test_unlock_sig(mocker):\n transaction = Transaction(\n chain=1,\n nonce=10,\n fee=50,\n value=5_000_000,\n to_address=\"1H7NtUENrEbwSVm52fHePzBnu4W3bCqimP\",\n unlock_sig=Config.COINBASE_UNLOCK_SIGNATURE,\n )\n\n assert transaction.validate_fields() == True\n assert transaction.validate_fields(raise_exception=True) == True\n\n transaction.unlock_sig = binascii.unhexlify(\"0\" * 258)\n assert transaction.validate_fields() == False\n with pytest.raises(\n TransactionNotValid, match=errors.TRANSACTION_FIELD_SIGNATURE\n ):\n transaction.validate_fields(raise_exception=True)\n\n transaction.unlock_sig = binascii.unhexlify(\"1\" * 256)\n assert transaction.validate_fields() == True\n assert transaction.validate_fields(raise_exception=True) == True",
"def _fund(src_acc, accounts, amount, shard_index):\n if not accounts:\n return []\n hashes = []\n for account in accounts:\n from_address = cli.get_address(src_acc)\n to_address = cli.get_address(account)\n passphrase = get_passphrase(src_acc)\n h = send_transaction(from_address, to_address, shard_index, shard_index, amount,\n passphrase=passphrase, retry=True, wait=True)\n if h is None:\n raise RuntimeError(f\"Failed to send tx from {from_address} to {to_address}\")\n hashes.append(h)\n return hashes",
"def nonceVerification(nonce, decryptedNonce):\n if nonce == decryptedNonce:\n return \"200 OK\"\n else:\n return \"400 Error Detected\"",
"def useNonce(self, server_url, timestamp, salt):\n\n if is_nonce_old(timestamp):\n return False\n\n try:\n mist_nonces = MistNonce.objects(server_url=server_url, salt=salt,\n timestamp=timestamp)\n except me.DoesNotExist:\n mist_nonces = []\n\n if len(mist_nonces) == 0:\n print(\"Timestamp = %s\" % timestamp)\n MistNonce(\n server_url=server_url, salt=salt, timestamp=timestamp\n ).save()\n return True\n\n return False",
"def test_signature_and_dlrne_fails_on_wrong_secret():\n mG = BilinearGroupPair()\n keypair = BBSPlusKeypair.generate(mG, 9)\n messages = [Bn(30), Bn(31), Bn(32)]\n pk, sk = keypair.pk, keypair.sk\n generators, h0 = keypair.generators, keypair.h0\n\n creator = BBSPlusSignatureCreator(pk)\n lhs = creator.commit(messages)\n presignature = sk.sign(lhs.com_message)\n signature = creator.obtain_signature(presignature)\n e, s, m1, m2, m3 = (Secret() for _ in range(5))\n secret_dict = {\n e: signature.e,\n s: signature.s,\n m1: messages[0],\n m2: messages[1],\n m3: messages[2],\n }\n\n sigproof = BBSPlusSignatureStmt([e, s, m1, m2, m3], pk, signature)\n\n g1 = mG.G1.generator()\n pg1 = signature.s * g1\n pg2, g2 = mG.G1.order().random() * g1, mG.G1.order().random() * g1\n dneq = DLNotEqual((pg1, g1), (pg2, g2), s, bind=True)\n\n secrets = [Secret() for _ in range(5)]\n sigproof1 = BBSPlusSignatureStmt(secrets, pk, signature)\n dneq1 = DLNotEqual((pg1, g1), (pg2, g2), secrets[1], bind=True)\n\n andp = sigproof & dneq\n andp1 = sigproof1 & dneq1\n prov = andp.get_prover(secret_dict)\n\n prov.subs[1].secret_values[s] = signature.s + 1\n ver = andp1.get_verifier()\n\n ver.process_precommitment(prov.precommit())\n\n commitment = prov.commit()\n\n challenge = ver.send_challenge(commitment)\n responses = prov.compute_response(challenge)\n with pytest.raises(ValidationError):\n ver.verify(responses)"
]
| [
"0.7153444",
"0.58825755",
"0.5746995",
"0.5733096",
"0.5711697",
"0.567918",
"0.56385994",
"0.5612486",
"0.5612486",
"0.55686194",
"0.5554134",
"0.55391735",
"0.54671746",
"0.5456013",
"0.54439116",
"0.5411669",
"0.537741",
"0.5359163",
"0.53529143",
"0.5338359",
"0.5334859",
"0.531487",
"0.53114974",
"0.5282851",
"0.52726394",
"0.5261222",
"0.5251959",
"0.5250221",
"0.5247287",
"0.5245956"
]
| 0.69079286 | 1 |
Return controller instance that is based on the equipment role. | def get_controller(equipment, accessmethod, logfile=None):
path = _CONTROLLERMAP[accessmethod]
constructor = module.get_object(path)
return constructor(equipment, logfile) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def controller( self ):\n\t\ttry:\n\t\t\treturn self._controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"controller: %s\" % str(e) )",
"def _get_controller(self):\n return self.__controller",
"def controller(self) -> Optional['outputs.CSIPowerStoreSpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def controller(self) -> Optional['outputs.CSIUnitySpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def getController(self):\n return self.__controller",
"def get_registered_controller(self, model):\n return self._registry[model]",
"def controller(self) -> Optional['outputs.CSIPowerMaxSpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def controller(self):\n return self._controller",
"def create_controller(self, typ):\n return self.controller_objects[typ]()",
"def get_controller2(self):\n return self.__controller2",
"def get_player_controller() -> unrealsdk.UObject:\n return unrealsdk.GetEngine().GamePlayers[0].Actor",
"def robot(self):\n return equipment_module.Equipment(self._get_attr('robot_id'))",
"def get_controller(cls):\n if not cls.hnd:\n raise Exception('A handler is to be set for getting contoller.')\n if not cls.controller:\n cls.controller = cls.config.controller_class(cls.hnd)\n cls.session = cls.controller.session\n return cls.controller",
"def controller(self) -> Optional['outputs.CSIVXFlexOSSpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def robot(self):\n return equipment_module.Equipment(\n self._get_attr('extraction_robot_id'))",
"def instance():\n\n if Controller._instance == None:\n Controller._instance = Controller()\n return Controller._instance",
"def getController(self,deviceID):\n if deviceID in self.controllers:\n return self.controllers[deviceID]\n else:\n newCtrl = MotorController(self,deviceID)\n self.controllers[deviceID] = newCtrl\n return newCtrl",
"def get_controller1(self):\n return self.__controller1",
"def controller(self) -> Optional['outputs.CSIIsilonSpecDriverController']:\n return pulumi.get(self, \"controller\")",
"def _get_equipment(self):\r\n eq = self._pvsr.getEquipmentByName(self._meas[\"equipment\"])\r\n if eq is None:\r\n site = self._pvsr.getSiteByName(self._default_site)\r\n if site is None:\r\n logging.info(\"Creating new default site {0}\".format(self._default_site))\r\n site = self._pvsr.create_pvsr_object(\"Site\")\r\n site.ParentId = 1\r\n site.Name = self._default_site\r\n site=self._pvsr.addSite(site)\r\n else:\r\n logging.debug(\"Default site ID is {0}\".format(site.Id))\r\n \r\n logging.info(\"Creating new equipment: {0}\".format(self._meas[\"equipment\"]))\r\n if self._meas[\"collector_type\"] == 'J':\r\n eq = self._pvsr.create_pvsr_object(\"JagaEquipment\")\r\n eq.ASCII_0000_EQ_COLL_KEY = self._meas[\"equipment\"] + \"key\"\r\n elif self._meas[\"collector_type\"] == 'Y':\r\n eq = self._pvsr.create_pvsr_object(\"SynthTransEquipment\")\r\n else:\r\n raise ValueError(\"The equipment does not exist in PVSR\") \r\n eq.Name = self._meas[\"equipment\"]\r\n eq.ParentId = site.Id\r\n eq.CollectorType = self._meas[\"collector_type\"]\r\n eq.IntervalInSec = 300\r\n eq.RetainRawData = 365\r\n eq.CollectData = \"Yes\"\r\n \r\n eq = self._pvsr.addEquipment(eq)\r\n logging.info(\"Added equipment {0}, id: {1}\".format(self._meas[\"equipment\"],eq.Id))\r\n else:\r\n logging.debug(\"Found equipment: {0}, id: {1}\".format(self._meas[\"equipment\"],eq.Id))\r\n return eq",
"def _get_role(self):\n return self.__role",
"def get_equipment(self, name):\n db = self.session\n try:\n eqrow = db.query(models.Equipment).filter(models.Equipment.name.contains(name)).one()\n except config.NoResultFound as err:\n raise config.ConfigError(\"Bad equipment name %r: %s\" % (name, err))\n return EquipmentRuntime(eqrow, \"unspecified\", self.get_logfile(), db)",
"def get_controller(self) -> PIDController:\n return deepcopy(self._controller)",
"def controller(self): # type: () -> ControllerHostConfig\n return self.host_settings.controller",
"def create_controller() -> Controller:\n _controller = Controller()\n return _controller",
"def controller(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"controller\")",
"def get_claim_controller(self, queue, project=None):\n target = self.lookup(queue, project)\n return target and target.claim_controller",
"def get_role(self):\n return self.role",
"def get_controller_func(controller):\n\n if controller in CONTROLLERS:\n return CONTROLLERS[controller]\n\n return None",
"def acquire(self) -> ICommunicationController:\n register = self.__constructor.build_communication()\n self.__constructor.close()\n com_proc = self.__proc_cls(register)\n com_ctrl = cc(com_proc)\n return com_ctrl"
]
| [
"0.6269231",
"0.6233761",
"0.6187338",
"0.6164868",
"0.6092095",
"0.60748947",
"0.6031051",
"0.5952912",
"0.5898791",
"0.58094746",
"0.58000463",
"0.57449204",
"0.5729063",
"0.5727584",
"0.5714278",
"0.5706469",
"0.569184",
"0.566643",
"0.5543107",
"0.55369985",
"0.5530174",
"0.54606265",
"0.54599786",
"0.54293483",
"0.5374419",
"0.53214025",
"0.52912396",
"0.5258835",
"0.5245722",
"0.52184993"
]
| 0.71080726 | 0 |
Converts the given data frame into a list of lists. When the `row` paremeter is given with a value >= 0, only that row is extracted as list from the data frame. | def as_list(df: pandas.DataFrame, row=-1) -> list:
if df is None:
return []
if row >= 0:
rec = []
for col in range(0, 13):
rec.append(df.iat[row, col])
return rec
recs = []
for row in range(df.shape[0]):
recs.append(as_list(df, row=row)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dataframe_to_list(df: pandas.DataFrame) -> list:\n return json.loads(df.to_json(orient=\"records\"))",
"def column_to_list(data, index):\n return [line[index] for line in data]",
"def rows_from_data (data):\n return data.tolist() # use numpy.ndarray conversion function",
"def geo_dataframe_to_list(data_frame, polygon=False):\n to_return = []\n for line in data_frame.geometry:\n lines = []\n if polygon:\n for values in line.exterior.coords:\n lines.append(values)\n else:\n for values in line.coords:\n lines.append(values)\n to_return.append(lines)\n return to_return",
"def getRowVals(self, row=None, include_nones=None): # Returns: row values\n if row is None or row < 1 or row > self.nRow:\n raise SelectError(f\"bad row number :{row}\") \n \n vals = []\n for ci in range(self.nCol):\n col = ci + 1\n val = self.getCellVal(row=row, col=col)\n if include_nones or not self.isEmpty(val):\n vals.append(val)\n return vals",
"def convert_df_to_list(dividend_df):\n dividend_list = []\n date_list = dividend_df.head(len(dividend_df)).index.strftime('%Y-%m-%d').tolist()\n dividend_val_list = dividend_df.tolist()\n for i in range (len(dividend_val_list)):\n dividend_list.append([date_list[i], dividend_val_list[i]]) \n \n return(dividend_list)",
"def get_values(df):\n return df.columns.values.tolist()",
"def toList(self, rowmajor=0):\r\n if rowmajor:\r\n return copy.copy(self.mlist)\r\n else:\r\n return self.transpose().mlist",
"def getRow(self, row):\n returnvalue = list()\n for item in self._value[row]:\n returnvalue.append(item)\n return returnvalue",
"def data_to_list(data_index):\n results = []\n for row in data:\n results.append(int(row[data_index]))\n return results",
"def get_coin_price_list(df: pd.DataFrame) -> list:\n return df['rates'].to_list()",
"def _get_xls_row_vals(self, row):\n return [v.value for v in row]",
"def as_list(self):\n data = []\n for row in self._matrix_data:\n for column in row:\n data.append(column)\n return data",
"def convert_to_list(graph):\n result = []\n for i in range(graph.size):\n row = []\n for j in range(graph.size):\n if graph.matrix[i][j]:\n row.append(j)\n result.append(row)\n return result",
"def column_to_list(column):\n column = column.tolist()\n column_string = \"\"\n for i in range(len(column)):\n current_row = column[i]\n try:\n column_string = column_string + current_row + \" \"\n except:\n pass\n column_list = column_string.split()\n return column_list",
"def get_rows(self):\n rowlist = []\n if self.direction == \"horizontal\":\n rowlist.append(int(self.position[0][0]))\n return rowlist\n else:\n rowlist.append(int(self.position[0][0]))\n rowlist.append(int(self.position[1][0]))\n if self.size == 3:\n rowlist.append(int(self.position[2][0]))\n return rowlist",
"def rows(self):\n return list(self)",
"def to_list(self):\n return SeriesDefault.register(pandas.Series.to_list)(self)",
"def rows(self) -> List[List]:\n return self._rows",
"def getFloatRow(self, int: int) -> typing.List[float]:\n ...",
"def convert_col_to_list(df, col='Question and Answer'):\n text = df[col].values.tolist()\n return text",
"def get_rows(self) -> List[List[str]]:\n rows = []\n max_col_len = max([len(i) for i in self.param_cols.values()])\n for row_num in range(max_col_len):\n row = []\n first = True\n for parameter in self.parameters:\n try:\n sub_row = self.param_cols[parameter.name][row_num]\n except IndexError:\n sub_row = ['' for _ in range(parameter.num_values + 1)]\n if not first:\n sub_row.insert(0, '')\n row.extend(sub_row)\n first = False\n rows.append(row)\n\n return rows",
"def _format_column(self, row_data):\n return [[row[i] for row in row_data] for i in range(self.row_length)]",
"def pandas_to_rows(df):\n if df is None:\n _logger.debug(\"Returning nothing\")\n return iter([])\n if type(df) is pd.Series:\n df = df.to_frame().T\n if df.empty:\n _logger.warning(\"Pandas DataFrame is empty! Returning nothing!\")\n return iter([])\n _logger.debug(\"Convert DataFrame of shape {} to partition with types:\\n{}\".format(df.shape, df.dtypes))\n records = df.to_records(index=False)\n records = convert_dtypes(records)\n first_row, records = peek(records)\n first_row_info = [\"{} ({}): {}\".format(k, rtype(v), v) for k, v in zip(df.columns, first_row)]\n _logger.debug(\"First record row: {}\".format(first_row_info))\n row = Row(*df.columns)\n return (row(*elems) for elems in records)",
"def pandas_df_to_records(df):\n return df.to_records(index=False).tolist()",
"def series_to_list(series: pd.Series) -> List:\n list_cols = []\n for item in series:\n list_cols.append(item)\n\n return list_cols",
"def row(self, row: int) -> list:\n index_start = row * 9\n return self.grid[index_start:index_start+9]",
"def obj_lister(df):\n obj_list = []\n for col in df.select_dtypes([np.object]):\n obj_list.append(col)\n return obj_list",
"def data(self) -> List[List[Any]]:\n\n column_wise = [column.values for column in self.plaincolumns]\n row_wise = [list(row) for row in zip(*column_wise)]\n\n return row_wise",
"def _get_datapoints(sheet, row, col):\n rowc = _FIELDS['cell_value']['datapoints']['row']\n # Data can start on two different rows. Try first option and then next row.\n if sheet.cell(row + rowc, col).value:\n start_row = row + rowc\n final_row = row + rowc\n else:\n start_row = row + (rowc + 1)\n final_row = row + (rowc + 1)\n point = sheet.cell(final_row, col).value\n while point:\n final_row += 1\n point = sheet.cell(final_row, col).value\n return [sheet.cell(i, col).value for i in range(start_row, final_row)]"
]
| [
"0.6662499",
"0.6641174",
"0.6313694",
"0.6297723",
"0.62273407",
"0.6145791",
"0.60353565",
"0.60344374",
"0.6024161",
"0.60228693",
"0.59896314",
"0.59687847",
"0.59476876",
"0.5942627",
"0.592644",
"0.58992493",
"0.5875752",
"0.58308697",
"0.58127946",
"0.5726091",
"0.5696033",
"0.5691388",
"0.568618",
"0.5637541",
"0.5635463",
"0.56128424",
"0.55935514",
"0.5590829",
"0.5589854",
"0.55735576"
]
| 0.83004636 | 0 |
Inherite the query here to add the woo instance field for group by. | def _query(self, with_clause='', fields={}, groupby='', from_clause=''):
fields['woo_instance_id'] = ", s.woo_instance_id as woo_instance_id"
groupby += ', s.woo_instance_id'
return super(SaleReport, self)._query(with_clause, fields, groupby, from_clause) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def aggregate_query(self):\n raise NotImplementedError",
"def group_by(self, *args):\n for name in args:\n assert name in self._fields or name in self._calculated_fields, \\\n 'Cannot group by `%s` since it is not included in the query' % name\n qs = copy(self)\n qs._grouping_fields = args\n return qs",
"def _aggregation_target(self):\n ...",
"def __init__(self, base_qs, grouping_fields, calculated_fields):\n super(AggregateQuerySet, self).__init__(base_qs._model_cls, base_qs._database)\n assert calculated_fields, 'No calculated fields specified for aggregation'\n self._fields = grouping_fields\n self._grouping_fields = grouping_fields\n self._calculated_fields = calculated_fields\n self._order_by = list(base_qs._order_by)\n self._where_q = base_qs._where_q\n self._prewhere_q = base_qs._prewhere_q\n self._limits = base_qs._limits\n self._distinct = base_qs._distinct",
"def with_totals(self):\n qs = copy(self)\n qs._grouping_with_totals = True\n return qs",
"def fetch_aggregation(self):\n return None",
"def get_queryset(self, request):\r\n queryset = super(MetricGroupAdmin, self).get_queryset(request)\r\n # poor-man's DISTINCT ON for Sqlite3\r\n qs_values = queryset.values('id', 'name')\r\n # 2.7+ only :(\r\n # = {metric['name']: metric['id'] for metric in qs_values}\r\n distinct_names = {}\r\n for metric in qs_values:\r\n distinct_names[metric['name']] = metric['id']\r\n queryset = self.model.objects.filter(id__in=distinct_names.values())\r\n return queryset",
"def queryset(self, request):\n qs = super(AdRepOrderAdmin, self).queryset(request)\n qs = AdRepOrder.objects.select_related().filter(id__in=qs\n ).defer('ad_rep__site__envelope',\n 'ad_rep__site__geom',\n 'ad_rep__site__point')\n return qs",
"def get_results_from_aggregation_sources(self, context):",
"def __init__(self, *args, **kwargs):\n super(DateWindowEOCMeasure, self).__init__(*args, **kwargs)\n self.fields_to_group_by = ['bene_sk']",
"def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.annotate(models.Count('work', distinct=True))\n return qs",
"def queryset(self, request):\n qs = super(AdRepAdmin, self).queryset(request)\n qs = AdRep.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs",
"def _custom_filter(self, query):\r\n return query",
"def __iter__(self):\n grouping = collections.defaultdict(list)\n\n for name, field in self.fields.items():\n group = getattr(field, 'group', '0. ')\n grouping[group].append((field.verbose_name, getattr(\n self.instance, name)))\n\n rexp = re.compile(r\"\\d+. \")\n\n for group, fields in sorted(grouping.items()):\n yield rexp.sub('', group), fields",
"def get_aggregations(self):\n return []",
"def __init__(self, *args, **kwargs):\n super(ProcedureMeasure, self).__init__(*args, **kwargs)\n self.fields_to_group_by = ['bene_sk', 'clm_from_dt']",
"def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.annotate(\n work__count=models.Count('release__works', distinct=True))\n qs = qs.annotate(\n release__count=models.Count('release__id', distinct=True))\n return qs",
"def get_aggregate(self, episode_queryset):\n pass",
"def queryset(self, request):\n qs = super(AdRepLeadAdmin, self).queryset(request)\n qs = AdRepLead.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs",
"def _merge_by_query(self, obj_dict):\n _res = self.__session.query(obj_dict[\"class\"]).filter_by(**obj_dict[\"query_dict\"]).first()\n\n if _res is None:\n self._add(obj_dict[\"instance\"])\n else:\n if hasattr(obj_dict[\"instance\"], 'attributes') and \\\n hasattr(obj_dict[\"instance\"], 'p_key'):\n for attr in obj_dict[\"instance\"].attributes:\n if attr not in obj_dict[\"instance\"].p_key:\n setattr(_res, attr, getattr(obj_dict[\"instance\"], attr))\n # updating the instance\n obj_dict[\"instance\"] = _res\n else:\n raise AttributeError(\"Class variable (attributes / p_key) not set for %s\" %\n (obj_dict[\"instance\"],))",
"def queryset(self, request):\n qs = super(AdRepSiteAdmin, self).queryset(request)\n qs = AdRepSite.objects.select_related().filter(\n id__in=qs).defer('site__envelope', 'site__geom', 'site__point')\n return qs",
"def queryset(self, request):\n qs = super(TwitterAccountAdmin, self).queryset(request)\n qs = TwitterAccount.objects.select_related().filter(id__in=qs\n ).defer('site__envelope', 'site__geom', 'site__point')\n return qs",
"def get_grouped_data(self, field_name):\n pass",
"def _enrich_results(self, record, query):\n record['metadata.query_name'] = query['name']\n record['metadata.query_id'] = '{}_{}'.format(\n query['name'], self.run_tag)\n record['metadata.query_description'] = query['description']\n record['metadata.query_headers'] = query['headers']\n record['@timestamp'] = int(round(time.time() * 1000))\n return record",
"def aggregate_results(self):\n\n raise NotImplementedError",
"def base_queryset(self):\n return self.select_related('product_class')\\\n .prefetch_related('children', 'product_options', 'product_class__options', 'stockrecords', 'images') \\\n .annotate(num_product_class_options=Count('product_class__options'),\n num_product_options=Count('product_options'))",
"def get_queryset(self):\n queryset = super().get_queryset()\n today = datetime.datetime.today()\n return queryset.annotate(\n relevance=models.Case(\n models.When(date__gte=today, then=1),\n models.When(date__lt=today, then=2),\n output_field=models.IntegerField(),\n )).order_by('relevance', 'date')",
"def make_group_by_keyword(self, keyword):\r\n pass",
"def get_summary(self, group_by=None, query=None):\n if query:\n query = self._normalize_query(query)\n else:\n query = {}\n query['deleted'] = False\n group = {'available': '$available'}\n if group_by is not None:\n for g in group_by.split(','):\n if '.' in g:\n parts = g.split('.')\n if parts[0] not in group:\n group[parts[0]] = {}\n group[parts[0]][parts[1]] = '${}.{}'.format(parts[0], parts[1])\n else:\n group[g] = '$'+g\n results = self.instances.aggregate([\n {'$match': query},\n {'$group': {'_id': group, 'count': {'$sum': 1}}},\n ])\n summary = collections.defaultdict(lambda: {'available': 0, 'total': 0})\n def get_key_name(key):\n grouping = key['_id']\n name_arr = []\n for group_name, val in grouping.items():\n if group_name == 'available':\n continue\n if isinstance(val, dict):\n if val:\n for k, v in val.items():\n name_arr.append(\"{}.{}:{}\".format(group_name, k, v))\n else:\n name_arr.append(group_name+':')\n else:\n name_arr.append(group_name+':'+val)\n if name_arr:\n key_name = ','.join(sorted(name_arr))\n return key_name\n else:\n return 'all'\n for result in results:\n key_name = get_key_name(result)\n count = result['count']\n if result['_id']['available']:\n summary[key_name]['available'] += count\n summary[key_name]['total'] += count\n return summary",
"def get_queryset(self, request):\n qs = super().get_queryset(request)\n qs = qs.annotate(models.Count('works'))\n return qs"
]
| [
"0.5847245",
"0.5766902",
"0.57639116",
"0.5744305",
"0.5731105",
"0.5619564",
"0.5573698",
"0.5570105",
"0.54822487",
"0.5438589",
"0.5371942",
"0.534263",
"0.5284601",
"0.5278813",
"0.5276851",
"0.52438235",
"0.51627016",
"0.51558334",
"0.5118398",
"0.5118195",
"0.5111496",
"0.5082594",
"0.50734895",
"0.5061894",
"0.50555044",
"0.50202334",
"0.4985055",
"0.4969577",
"0.4966411",
"0.4954018"
]
| 0.7433047 | 0 |
Run clingo with the provided argument list and return the parsed JSON result. | def solve(*args):
args = ['clingo','--outf=2']+list(args)
print ' '.join(args)
clingo = subprocess.Popen(
' '.join(args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
out, err = clingo.communicate()
if err:
print err
with open('dump.lp','w') as outfile:
result = json.loads(out)
witness = result['Call'][0]['Witnesses'][0]['Value']
for atom in sorted(witness):
outfile.write(atom +'\n')
return parse_json_result(out) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cc_json():\n return sh(\"intercept-build ./build.py compile:\\\\* -R; ./build.py -c compile:\\\\*\")",
"def xcresulttool_json(*args):\n args = list(args) + ['--format', 'json']\n contents = xcresulttool(*args)\n return json.loads(contents)",
"def xcresulttool_json(*args):\n args = list(args) + ['--format', 'json']\n contents = xcresulttool(*args)\n return json.loads(contents)",
"def run_json(self, cmd):\n\n try:\n loaded = json.loads(self.run(cmd + ' J'))\n except ValueError as decode_error:\n raise StorcliException('Problem processing output: {}'.format(decode_error))\n return loaded",
"def call_py(self, command, *args, **kwargs):\n\tif self.output_format and self.output_format != 'json':\n\t raise RuntimeError, \"output_format must be 'json' for this to work\"\n\treturn simplejson.loads(self.call(command, *args, **kwargs))",
"def cmd_list(args):",
"def api_call():\n\n json_str = load_input()\n output = {\n 'inputs': json_str,\n 'results': 'cool results'}\n\n return json.dumps(output), 200, {'Content-Type': 'text/plain;charset=utf-8'}",
"def Run(self, args):\n p = parent.GetParent(args)\n return requests.List(parent=p, filter=(\n args.state.upper() if args.state else None))",
"def do_list(self, args):\n if args.option == 'config':\n print(list_config())\n if args.option == 'queries':\n for k,v in list_queries().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'jobs':\n update_jobs(CLI_GLOBALS.ENGAGEMENT)\n for k,v in list_jobs().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'results':\n for i in list_results():\n print(i)\n if args.option == 'key':\n for k,v in list_key().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'engagement':\n print(list_engagement())",
"def run_cmd(servable, input):\n\n if not any([servable, input]):\n format_output(HELP_STR)\n return\n\n client = get_dlhub_client()\n\n data = json.loads(input)\n\n res = client.run(servable, data)\n\n format_output(res)\n return res",
"def main():\n # if there are no command line args, run as normal with the input from stdin\n if len(sys.argv) == 1:\n json_string = \"\".join(sys.stdin)\n hierarchical_jsons = [load_json(json_string)]\n # if there are command line args, run using the first argument as a file path to a file with\n # correctly formatted test json values\n else:\n with open(sys.argv[1]) as file:\n hierarchical_jsons = json.load(file)\n\n flattened_jsons = []\n\n # for all the jsons given, in the non-test case there will only be one\n for hierarchical_json in hierarchical_jsons:\n # flatten the json object\n flat_json = flatten_json(hierarchical_json)\n # print the prettied json to stdout\n json_string = json.dumps(flat_json, indent=4)\n flattened_jsons.append(json_string)\n print(json_string)\n\n return flattened_jsons",
"def run_atoml_app():\n data = flask.request.json\n features, output = _get_output(data)\n return_dict = {'input': data, 'features': features, 'output': output}\n return_dict = flask.jsonify(**return_dict)\n\n return return_dict",
"def main(args):\n\n if args['verbose']:\n logging.basicConfig(level=logging.DEBUG)\n else:\n if args['quiet']:\n logging.basicConfig(level=logging.ERROR)\n else:\n logging.basicConfig(level=logging.WARNING)\n\n # unpack args\n\n json_file = args['JSONfile']\n data_dir = args['data_directory']\n temp_file = args['tmp']\n release = args['release']\n\n if json_file:\n json_data = get_json_data(json_file)\n else:\n logging.log(logging.DEBUG, \"Preparing to download JSONfile\")\n if os.path.isfile(temp_file):\n logging.log(logging.WARNING, \"Removing file %s\" % temp_file)\n os.remove(temp_file)\n logging.log(logging.DEBUG, \"Issuing wget for JSON file\")\n args = ['wget', 'https://security-tracker.debian.org/tracker/data/json',\n '-O', temp_file]\n if os.path.isdir('/etc/ssl'):\n if os.path.isdir('/etc/ssl/ca-debian'):\n args.insert(1, '--ca-directory=/etc/ssl/ca-debian')\n call(args)\n logging.log(logging.DEBUG, \"File %s received\" % temp_file)\n json_data = get_json_data(temp_file)\n if os.path.isfile(temp_file):\n logging.log(logging.DEBUG, \"Removing file %s\" % temp_file)\n os.remove(temp_file)\n\n parseJSON(json_data, release)\n parsedirs(data_dir, re.compile('^dsa.+\\.data$'), 2, release)\n parsedirs(data_dir, re.compile('^dla.+\\.data$'), 2, release)\n logging.log(logging.INFO, \"Finished parsing JSON data\")\n printdsas(ovals)",
"def cli(ctx):",
"def cli(ctx):",
"def test_CLI_user_json(self, capsys):\n sys.argv = (self.common_args + [\"-l\", \"Berger_POPC\"]\n + [\"-lt\", str(PATH_ROOT_DATA / \"Berger_POPC.json\")])\n UI.entry_point()\n captured = capsys.readouterr().out\n assert \"Results written to OP_buildH.out\" in captured",
"def gcloud_json(cmd_list):\n if any(['--format' in field for field in cmd_list]):\n raise ValueError('Format must be controlled by this function')\n cmd_list = [c for c in cmd_list] # Copy list to prevent mutation.\n cmd_list.append('--format=json')\n raw_data = subprocess.check_output(cmd_list)\n return json.loads(raw_data)",
"def main():\n parser = argparse.ArgumentParser(description='View custom fields for existing clinical reports.')\n parser.add_argument('c', metavar='clinical_report_id', type=int)\n args = parser.parse_args()\n\n cr_id = args.c\n\n json_response = get_fields_for_cr(cr_id)\n sys.stdout.write(json.dumps(json_response, indent=4))",
"def _run_json(key, process, cmd):\n logging.info('Running %s', ' '.join(cmd))\n raw = subprocess.check_output(cmd)\n logging.info('- returned %d bytes', len(raw))\n return key, process(json.loads(raw))",
"def main_CL():\r\n version=1.0\r\n st = time.time()\r\n parser = OptionParser(usage=usage(), version='%s'%version)\r\n parser.add_option(\"-n\", \"--days\", dest=\"days\", default=\"30\", help=\"Days ago, defaults to 30 days\")\r\n parser.add_option(\"-s\", \"--stream\", dest=\"stream\", default=\"all\", help=\"Code Stream, defaults to all\")\r\n parser.add_option(\"-u\", \"--usage\", dest=\"usage\", default=\"\", help=\"Show usage information\")\r\n parser.add_option(\"-d\", \"--debug\", dest='debug', action=\"count\", help=\"The debug level, use multiple to get more.\")\r\n (options, args) = parser.parse_args()\r\n\r\n if options.debug > 1:\r\n print ' days %s' %(options.days)\r\n print ' args: %s' %args\r\n else:\r\n options.debug = 0\r\n \r\n if options.usage:\r\n print usage()\r\n else:\r\n obj=ListCRs()\r\n obj.setUp()\r\n since = options.days \r\n \r\n #stream = str(stream).strip() \r\n obj.listCRsCL(since, options, st) \r\n \r\n print '\\nTook a total of %3.2f secs -^' %(time.time()-st)",
"def test_execute_job_with_array_input(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"baseCommand\": [\"python3\", \"script.py\"],\n \"inputs\":\n {\n \"test_int_array\": {\"type\": {\"type\": \"array\", \"items\": \"int\"}, \"inputBinding\": {\"position\": 1}},\n \"test_float_array\": {\"type\": {\"type\": \"array\", \"items\": \"float\"}},\n \"test_string_array\": {\"type\": {\"type\": \"array\", \"items\": \"string\"}},\n \"test_reference_array\": {\"type\": {\"type\": \"array\", \"items\": \"File\"}},\n \"test_int_value\": \"int\",\n \"test_float_value\": \"float\",\n \"test_string_value\": \"string\",\n \"test_reference_http_value\": \"File\",\n \"test_reference_file_value\": \"File\",\n \"test_reference_s3_value\": \"File\"\n },\n \"requirements\": {\n CWL_REQUIREMENT_APP_DOCKER: {\n \"dockerPull\": \"python:3.7-alpine\"\n },\n CWL_REQUIREMENT_INIT_WORKDIR: {\n \"listing\": [\n {\n \"entryname\": \"script.py\",\n \"entry\": cleandoc(\"\"\"\n import json\n import os\n input = $(inputs)\n for key, value in input.items():\n if isinstance(value, list):\n if all(isinstance(val, int) for val in value):\n value = map(lambda v: v+1, value)\n elif all(isinstance(val, float) for val in value):\n value = map(lambda v: v+0.5, value)\n elif all(isinstance(val, bool) for val in value):\n value = map(lambda v: not v, value)\n elif all(isinstance(val, str) for val in value):\n value = map(lambda v: v.upper(), value)\n elif all(isinstance(val, dict) for val in value):\n def tmp(value):\n path_ = value.get('path')\n if path_ and os.path.exists(path_):\n with open (path_, 'r') as file_:\n file_data = file_.read()\n return file_data.upper()\n value = map(tmp, value)\n input[key] = \";\".join(map(str, value))\n elif isinstance(value, dict):\n path_ = value.get('path')\n if path_ and os.path.exists(path_):\n with open (path_, 'r') as file_:\n file_data = file_.read()\n input[key] = file_data.upper()\n elif isinstance(value, str):\n input[key] = value.upper()\n elif isinstance(value, bool):\n input[key] = not value\n elif isinstance(value, int):\n input[key] = value+1\n elif isinstance(value, float):\n input[key] = value+0.5\n json.dump(input, open(\"./tmp.txt\",\"w\"))\n \"\"\")\n }\n ]\n }\n },\n \"outputs\": [{\"id\": \"output_test\", \"type\": \"File\", \"outputBinding\": {\"glob\": \"tmp.txt\"}}],\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n try:\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n except colander.Invalid:\n self.fail(\"Test\")\n\n assert desc is not None\n\n test_bucket_ref = mocked_aws_s3_bucket_test_file(\n \"wps-process-test-bucket\",\n \"input_file_s3.txt\",\n \"This is a generated file for s3 test\"\n )\n\n test_http_ref = mocked_reference_test_file(\n \"input_file_http.txt\",\n \"http\",\n \"This is a generated file for http test\"\n )\n\n test_file_ref = mocked_reference_test_file(\n \"input_file_ref.txt\",\n \"file\",\n \"This is a generated file for file test\"\n )\n\n exec_body = {\n \"mode\": EXECUTE_MODE_ASYNC,\n \"response\": EXECUTE_RESPONSE_DOCUMENT,\n \"inputs\":\n [\n {\"id\": \"test_int_array\", \"value\": [10, 20, 30, 40, 50]},\n {\"id\": \"test_float_array\", \"value\": [10.03, 20.03, 30.03, 40.03, 50.03]},\n {\"id\": \"test_string_array\", \"value\": [\"this\", \"is\", \"a\", \"test\"]},\n {\"id\": \"test_reference_array\",\n \"value\": [\n {\"href\": test_file_ref},\n {\"href\": test_http_ref},\n {\"href\": test_bucket_ref}\n ]\n },\n {\"id\": \"test_int_value\", \"value\": 2923},\n {\"id\": \"test_float_value\", \"value\": 389.73},\n {\"id\": \"test_string_value\", \"value\": \"string_test\"},\n {\"id\": \"test_reference_http_value\", \"href\": test_http_ref},\n {\"id\": \"test_reference_file_value\", \"href\": test_file_ref},\n {\"id\": \"test_reference_s3_value\", \"href\": test_bucket_ref}\n ],\n \"outputs\": [\n {\"id\": \"output_test\", \"type\": \"File\"},\n ]\n }\n\n with contextlib.ExitStack() as stack_exec:\n for mock_exec in mocked_execute_process():\n stack_exec.enter_context(mock_exec)\n proc_url = \"/processes/{}/jobs\".format(self._testMethodName)\n resp = mocked_sub_requests(self.app, \"post_json\", proc_url, timeout=5,\n data=exec_body, headers=self.json_headers, only_local=True)\n assert resp.status_code in [200, 201], \"Failed with: [{}]\\nReason:\\n{}\".format(resp.status_code, resp.json)\n status_url = resp.json.get(\"location\")\n\n results = self.monitor_job(status_url)\n\n job_output_path = results.get(\"output_test\")[\"href\"].split(self.settings[\"weaver.wps_output_path\"])[-1]\n tmp_file = \"{}/{}\".format(self.settings[\"weaver.wps_output_dir\"], job_output_path)\n\n try:\n processed_values = json.load(open(tmp_file, \"r\"))\n except FileNotFoundError:\n self.fail(\"Output file [{}] was not found where it was expected to resume test\".format(tmp_file))\n except Exception as exception:\n self.fail(\"An error occurred during the reading of the file: {}\".format(exception))\n assert processed_values[\"test_int_array\"] == \"11;21;31;41;51\"\n assert processed_values[\"test_float_array\"] == \"10.53;20.53;30.53;40.53;50.53\"\n assert processed_values[\"test_string_array\"] == \"THIS;IS;A;TEST\"\n assert processed_values[\"test_reference_array\"] == (\"THIS IS A GENERATED FILE FOR FILE TEST;\"\n \"THIS IS A GENERATED FILE FOR HTTP TEST;\"\n \"THIS IS A GENERATED FILE FOR S3 TEST\")\n assert processed_values[\"test_int_value\"] == 2924\n assert processed_values[\"test_float_value\"] == 390.23\n assert processed_values[\"test_string_value\"] == \"STRING_TEST\"\n assert processed_values[\"test_reference_s3_value\"] == \"THIS IS A GENERATED FILE FOR S3 TEST\"\n assert processed_values[\"test_reference_http_value\"] == \"THIS IS A GENERATED FILE FOR HTTP TEST\"\n assert processed_values[\"test_reference_file_value\"] == \"THIS IS A GENERATED FILE FOR FILE TEST\"",
"def _run_query(args):\r\n\tdata = _unicode_urlencode(args)\r\n\tsock = urllib.request.urlopen(api_url + '?' + data)\r\n\tresult = sock.read()\r\n\tif result.startswith(codecs.BOM_UTF8):\r\n\t\tresult = result.lstrip(codecs.BOM_UTF8).decode('utf-8')\r\n\telif result.startswith(codecs.BOM_UTF16_LE):\r\n\t\tresult = result.lstrip(codecs.BOM_UTF16_LE).decode('utf-16-le')\r\n\telif result.startswith(codecs.BOM_UTF16_BE):\r\n\t\tresult = result.lstrip(codecs.BOM_UTF16_BE).decode('utf-16-be')\r\n\treturn json.loads(result)",
"def main():\n # There are no args, but parse them just so help works\n args = docopt(__doc__)\n print(process_files_json(), end=\"\")\n return None",
"def main():\n pods = openshift_object.get_running_pods()\n me = openshift_object.get_self()\n routes = openshift_object.get_routes()\n nodes = openshift_object.get_nodes()\n pvc = openshift_object.get_pvcs()\n pv = openshift_object.get_pv()\n project = openshift_object.get_projects()\n return jsonify({\n \"pods\": pods,\n \"me\": me,\n \"routes\": routes, \n \"nodes\":nodes,\n \"pvcs\":pvc,\n \"pv\":pv,\n \"projects\":project})",
"def get(self, args):\n\t\tif len(args) >= 2:\n\t\t\tif args[1] == \"list\":\n\t\t\t\tself.write_line(\"LIST {0}\".format(self.config[\"daemon\"][\"rootdir\"] + \"/package-index.json\"))",
"def main():\n local = salt.client.LocalClient()\n\n if len(sys.argv) == 2 and sys.argv[1] == '--list':\n print json.dumps(local.cmd('*', 'grains.items'), indent=4, sort_keys=True)\n elif len(sys.argv) == 3 and sys.argv[1] == '--host':\n print json.dumps(local.cmd(sys.argv[2], 'grains.items'), indent=4, sort_keys=True)\n else:\n print \"Need an argument, either --list or --host <host>\"",
"def main_list(args):\n return list_commands(args.directory)",
"def get_json_argument_list():\n list_of_arguments_to_get = [\"finish_time\", \"segmentation_training_samples\", \"patch_count_per_image\", \"learning_rate\", \"batch_k\",\n \"batch_p\", \"flip_augment\", \"standardize\", \"margin\", \"metric\"]\n\n return list_of_arguments_to_get",
"def main():\n\n dotfiles_location = os.path.expanduser('~') + '/dotfiles'\n default_bundles_location = dotfiles_location + '/bundles/dein/repos'\n\n parser = argparse.ArgumentParser(\n description='Build a cache of available colours schemes, output them into a JSON file for vim plugins to read'\n )\n\n parser.add_argument('-n', '--dryrun',\n action='store_true',\n dest='dryrun',\n required=False,\n help='Dryrun mode (do not write to cache file)'\n )\n\n parser.add_argument('-v', '--verbose',\n action='store_true',\n dest='verbose',\n required=False,\n help='Increase verbosity'\n )\n\n parser.add_argument('-c', '--cache',\n action='store',\n dest='cache_file',\n required=False,\n metavar='CACHE_FILE',\n default=dotfiles_location + '/colos.json',\n help='Cache file name'\n )\n\n parser.add_argument(\n action='store',\n dest='command', # maybe use an action to validate the commands with their args?\n nargs=1,\n help='Command to issue: generate, whitelist <name>, blacklist <name>, toggle-variant <name>'\n )\n\n parser.add_argument(\n dest='args',\n nargs='*'\n )\n\n try:\n args = parser.parse_args()\n except ValueError as e:\n print('Invalid options.', e, file=sys.stderr)\n sys.exit(1)\n\n\n data = cololib.cmd_load_data(args.cache_file, verbose=args.verbose)\n\n if 'build_cache' == args.command[0]:\n cololib.cmd_build_cache(\n data=data,\n search_path=default_bundles_location,\n verbose=args.verbose, dryrun=args.dryrun\n )\n elif 'get_random' == args.command[0]:\n if args.args:\n tag = args.args[0]\n else:\n tag = None\n cololib.cmd_get_random_colo(data=data, tag=tag, verbose=args.verbose)\n else:\n print('Unknown command \"%s\"'%(args.command[0]), file=sys.stderr)\n sys.exit(1)",
"def run_ntcontribs(args):\n nt_map(args)"
]
| [
"0.5743908",
"0.57331854",
"0.57331854",
"0.5550003",
"0.55271095",
"0.52811164",
"0.5247224",
"0.52448386",
"0.52060694",
"0.5136273",
"0.5135224",
"0.5096528",
"0.5070134",
"0.50677496",
"0.50677496",
"0.5049834",
"0.5035326",
"0.5027086",
"0.50106984",
"0.500295",
"0.49970853",
"0.49902397",
"0.49612674",
"0.4958691",
"0.49477974",
"0.49466482",
"0.49444675",
"0.49438405",
"0.49350226",
"0.49292767"
]
| 0.6133359 | 0 |
Like solve() but uses a random sign heuristic with a random seed. | def solve_randomly(*args):
args = list(args[0]) + ["--sign-def=3","--seed="+str(random.randint(0,1<<30))]
return solve(*args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solve(self):",
"def find():\n b = 0\n q = 0\n while b == q:\n seq = [randint(-10, 10) for _ in range(randint(15, 30))]\n b, b_at = brute_force(seq)\n q = solution(seq)\n print(seq, b, q, b_at)",
"def initLocalBestChoice(self):\n random.seed()\n return",
"def solve(self, state, times):",
"def solve(self):\n pass",
"def solve(self):\n pass",
"def solve(self):\n ...",
"def randomSolution(self):\n # seed the random number generator\n random.seed()\n # loop through all the features\n for feature in self.features:\n # pick a random number based on the size of the feature's domain\n domainIndex = random.randint(0, len(feature.domain) - 1)\n # assign the value from the domain\n feature.value = feature.domain[domainIndex]",
"def _solve(self, mu=None):\n pass",
"def check_seed(self, seed):\n out = self.complement(seed)\n return self.solver.solve([(i + 1) for i in seed] + [-(i + 1) for i in out])",
"def solve(self, solver):\n solver.solve()",
"def solve(self):\n \n raise NotImplementedError(\"not implemented!\")",
"def bad_seed(\n partner1_salary_compound_rate: float,\n partner1_salary_plateau: float,\n partner2_salary_compound_rate: float,\n partner2_salary_plateau: float,\n initial_year: int,\n increase_savings_weight: float,\n initial_tfsa_guess: float,\n final_tfsa_guess: float,\n initial_equalize_income_weighting_guess: float,\n final_equalize_income_weighting_guess: float,\n partner1_year_of_retirement: int,\n partner2_year_of_retirement: int,\n final_year: int,\n rrsp_adjustment_guess: float,\n rrsp_interest_rate: float,\n tfsa_interest_rate: float,\n optimize: solve.Optimizing_Solver,\n):\n\n initial_tfsa_func = optimize.subscribe_optimized_scalar(\n \"initial_tfsa\", 0, 1, initial_tfsa_guess\n )\n final_tfsa_func = optimize.subscribe_optimized_scalar(\n \"final_tfsa\", 0, 1, final_tfsa_guess\n )\n initial_equalize_income_weighting_func = optimize.subscribe_optimized_scalar(\n \"initial_equalize_income_weighting\",\n 0,\n 1,\n initial_equalize_income_weighting_guess,\n )\n final_equalize_income_weighting_func = optimize.subscribe_optimized_scalar(\n \"final_equalize_income_weighting\", 0, 1, final_equalize_income_weighting_guess\n )\n rrsp_adjustment_func = optimize.subscribe_optimized_scalar(\n \"rrsp_adjustment\", -1, 1, rrsp_adjustment_guess\n )\n\n return _bad_seed_raw(\n partner1_salary_compound_rate,\n partner1_salary_plateau,\n partner2_salary_compound_rate,\n partner2_salary_plateau,\n initial_year,\n increase_savings_weight,\n initial_tfsa_func,\n final_tfsa_func,\n initial_equalize_income_weighting_func,\n final_equalize_income_weighting_func,\n partner1_year_of_retirement,\n partner2_year_of_retirement,\n final_year,\n rrsp_adjustment_func,\n rrsp_interest_rate,\n tfsa_interest_rate,\n )",
"def generate_random_solution(self):\n # \"Generate random solution\"\n Individual.COUNT += 1\n if INITIALPOP == 'random':\n # Choose randomly a file in the original dataset.\n seed = random.choice(starting_seeds)\n Individual.SEEDS.add(seed)\n elif INITIALPOP == 'seeded':\n # Choose sequentially the inputs from the seed list.\n # NOTE: number of seeds should be no less than the initial population\n assert (len(starting_seeds) == POPSIZE)\n seed = starting_seeds[Individual.COUNT - 1]\n Individual.SEEDS.add(seed)\n\n digit1 = generate_digit(seed)\n digit1.is_original = True\n individual = Individual(digit1, seed)\n individual.seed = seed\n\n return individual",
"def solve(self):\n raise NotImplementedError(\"This method needs to be implemented.\")",
"def test_solver(allowed_symbols, len_sequence=3):\n secret_sequence = \"\"\n for _ in range(len_sequence):\n secret_sequence += allowed_symbols[random.randint(0, len_sequence - 1)]\n print('secret:', secret_sequence)\n\n solution = brute_force_solver(allowed_symbols, secret_sequence)\n return solution == tuple(secret_sequence)",
"def test_exact_supercontrolled_decompose_phase_0_use_random(self, seed):\n state = np.random.default_rng(seed)\n decomposer = self.make_random_supercontrolled_decomposer(state)\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n tgt_unitary = np.exp(1j * tgt_phase) * tgt_k1 @ Ud(0, 0, 0) @ tgt_k2\n self.check_exact_decomposition(tgt_unitary, decomposer, num_basis_uses=0)",
"def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)",
"def solve(self, **kwargs):\n return self.system.solve(**kwargs)",
"def findHeuristic(self, _, __):\n popSize = 100\n retain = 0.25\n random_select = 0.1\n mutate = 0.1\n\n popList = self.populationList(popSize)\n\n solved = False\n count = 0\n while not solved:\n # evolves current\n popList = (self.evolve(popList, retain, random_select, mutate))\n# print(popList) # for troubleshooting\n for i in popList:\n if (self.fitness(i) == 0):\n print(\"solution: \", i)\n solved = True\n break\n # if plateus at a local minima, then end after 50 generations\n if count >= 50:\n if (self.fitness(i) <= 10):\n print(\"solution: \", i)\n solved = True\n break\n if solved is True:\n break\n print(\"-----------------\")\n\n # will modify mutation, random_select and retain values to help leave a\n # local minima. More randomness the longer it takes up to specific points\n if count % 3 == 0:\n if mutate < 0.2:\n mutate += 0.01\n if random_select < 0.3:\n random_select += 0.01\n count += 1\n\n return exit(0)",
"def anneal():\n best_sol = list(range(SIZE))\n best_sum = get_sum(best_sol)\n shuffle(best_sol)\n\n temp = 10000000\n cool_rate = 0.0003\n\n counter = 0\n while temp > 1:\n new_sol = best_sol.copy()\n i, j = randint(0, SIZE - 1), randint(0, SIZE - 1)\n new_sol[i], new_sol[j] = new_sol[j], new_sol[i]\n new_energy = get_sum(new_sol)\n cur_energy = best_sum\n if calculate_probability(cur_energy, new_energy, temp) > random():\n best_sol = new_sol.copy()\n best_sum = new_energy\n temp *= 1 - cool_rate\n counter += 1\n\n print(counter)\n\n print(best_sol)\n print(best_sum)\n return best_sol, best_sum",
"def maximize_seed(self, seed, direction):\n while True:\n comp = self.complement(seed)\n x = self.solver.new_var() + 1\n if direction:\n # search for a solution w/ all of the current seed plus at\n # least one from the current complement.\n self.solver.add_clause([-x] + [i + 1 for i in comp]) # temporary clause\n # activate the temporary clause and all seed clauses\n havenew = self.solver.solve([x] + [i + 1 for i in seed])\n else:\n # search for a solution w/ none of current complement and at\n # least one from the current seed removed.\n self.solver.add_clause([-x] + [-(i + 1) for i in seed]) # temporary clause\n # activate the temporary clause and deactivate complement clauses\n havenew = self.solver.solve([x] + [-(i + 1) for i in comp])\n self.solver.add_clause([-x]) # remove the temporary clause\n\n if havenew:\n seed = self.get_seed()\n else:\n return seed",
"def mbed_solve (A, budgets, S, verbose=True):\n # print(S)\n start_time = time.time()\n x_v, C = initialize(A, S)\n if (verbose):\n print(\"Initialized\")\n print(\"V1: \", np.sum(x_v == 1), \" ,V2: \", np.sum(x_v == -1))\n results_info, S_new, Ad, edges_removed = random_choose_candidate_solve (x_v, C, A, S, budgets, start_time, verbose=verbose)\n return results_info, S_new, Ad, edges_removed",
"def eg_ok(n=1):\n\n random.seed(n)",
"def test_approx_supercontrolled_decompose_phase_0_use_random(self, seed, delta=0.01):\n state = np.random.default_rng(seed)\n basis_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n basis_phase = state.random() * 2 * np.pi\n basis_b = 0.4 # how to safely randomize?\n basis_unitary = np.exp(1j * basis_phase) * basis_k1 @ Ud(np.pi / 4, basis_b, 0) @ basis_k2\n decomposer = TwoQubitBasisDecomposer(UnitaryGate(basis_unitary), basis_fidelity=0.99)\n\n tgt_k1 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_k2 = np.kron(random_unitary(2, seed=state).data, random_unitary(2, seed=state).data)\n tgt_phase = state.random() * 2 * np.pi\n d1, d2, d3 = state.random(size=3) * delta\n tgt_unitary = np.exp(1j * tgt_phase) * tgt_k1 @ Ud(d1, d2, d3) @ tgt_k2\n self.check_approx_decomposition(tgt_unitary, decomposer, num_basis_uses=0)",
"def mutation(self):\n\n index = random.randint(0, len(self.solution_vector) - 1)\n self.solution_vector[index] = 1 - self.solution_vector[index]",
"def test_solve(test, test_targ, result):\n assert str(calc.solve(test, test_targ)) == result",
"def solution(self) -> State:",
"def solve(self):\n\n # sanity check\n if self.G is None or self.curve is None or self.Q is None:\n print(\"Can't solve not all parameters are set\")\n return False # unsuccessful\n\n self.count = 1 # initial count\n self.start = time.time()\n\n order = self.curve.order(self.G) # get order of generator\n\n ############ POLLARD'S RHO + FLOYD'S EXTENSION ############\n found = False\n\n # will probably find a factor, so need to loop with random numbers until we find it\n while not found:\n\n ############ GENERATE RANDOM FUNCTION POINTS ############\n points = [] # list of points to inform our random function\n\n for _ in range(17):\n a = secrets.randbelow(order)\n b = secrets.randbelow(order)\n P = (self.G * a) + (self.Q * b) # linear combination\n points.append([P, a, b]) # add to list\n\n ############ RANDOM START POINTS ############\n Y, aY, bY = X, aX, bX = points.pop() # random starting points\n\n ############ FLOYD'S CYCLE DETECTION ############\n while not found:\n X, aX, bX = g((X, aX, bX), order, points) # first runner\n Y, aY, bY = g(g((Y,aY,bY),order,points),order,points) # second runner\n\n found = X == Y # detect match\n self.count += 1 # increment count\n\n if bX == bY and aX == aY: # if we arrive at identical combinations\n found = False # reset and try again\n else:\n inv = modInverse((bX - bY) % order, order) # get mod inverse\n\n if inv == 0: # if no mod inverse exists\n found = False # need to randomly try again\n else: # we have found k\n self.k = ((aY - aX) * inv) % order # so set it\n\n if self.G * self.k != self.Q: # not always 100% going to work\n print(\"error\")\n found = True\n\n self.time = time.time() - self.start\n\n # set space\n self.space = 18\n\n if self.verbose:\n print(\"k:\", self.k)\n print(\"Time taken: %.3f s\" % (self.time)) # print time taken\n print(\"Space used: %d\" % (self.space)) # print space used\n print(\"Numbers checked:\", self.count) # print total count\n\n return True",
"def solve(self, network):\n # Convert to a network if it is not.\n if not isinstance(network, NeuralNetwork):\n network = NeuralNetwork(network)\n \n steps, _, _ = self._loop(network, max_steps=100000)\n if steps < 100000:\n print((\"Failed 100k test with %d\" % steps))\n return 0\n successes = 0\n points = np.array([-0.9, -0.5, 0.0, 0.5, 0.9])\n # The 1.35 and 0.15 were taken from the neat-python implementation.\n for x in points * self.h:\n for theta in points * self.r:\n for dx in points * 1.35:\n for dtheta0 in points * 0.15:\n state = (x, dx, np.array([theta, 0.0]), np.array([dtheta0, 0.0]))\n steps, states, _ = self._loop(network, initial=state, max_steps=1000)\n if steps >= 1000:\n successes += 1\n # return random.random() < 0.5\n return int(successes > 100)"
]
| [
"0.5972131",
"0.595319",
"0.5898223",
"0.58782655",
"0.5782235",
"0.5782235",
"0.5760689",
"0.5755695",
"0.57238746",
"0.56515795",
"0.5626522",
"0.5608089",
"0.55490136",
"0.54761404",
"0.5436409",
"0.5363292",
"0.53548735",
"0.5340229",
"0.5321458",
"0.52873623",
"0.5283367",
"0.5282389",
"0.5276354",
"0.5276154",
"0.5274954",
"0.5262153",
"0.52468073",
"0.5245439",
"0.52408063",
"0.52355033"
]
| 0.8397691 | 0 |
Parse the provided JSON text and extract a dict representing the predicates described in the first solver result. | def parse_json_result(out):
result = json.loads(out)
assert len(result['Call']) > 0
assert len(result['Call'][0]['Witnesses']) > 0
witness = result['Call'][0]['Witnesses'][0]['Value']
class identitydefaultdict(collections.defaultdict):
def __missing__(self, key):
return key
preds = collections.defaultdict(list)
env = identitydefaultdict()
for atom in witness:
parsed,dummy = parse_terms(atom)
preds[parsed[0]['predicate']].append(parsed)
return preds | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reach_process_text():\n response = request.body.read().decode('utf-8')\n body = json.loads(response)\n text = body.get('text')\n rp = reach.process_text(text)\n if rp and rp.statements:\n stmts = stmts_to_json(rp.statements)\n res = {'statements': stmts}\n return res\n else:\n res = {'statements': []}\n return res",
"def reach_process_json():\n response = request.body.read().decode('utf-8')\n body = json.loads(response)\n json_str = body.get('json')\n rp = reach.process_json_str(json_str)\n if rp and rp.statements:\n stmts = stmts_to_json(rp.statements)\n res = {'statements': stmts}\n return res\n else:\n res = {'statements': []}\n return res",
"def trips_process_text():\n response = request.body.read().decode('utf-8')\n body = json.loads(response)\n text = body.get('text')\n tp = trips.process_text(text)\n if tp and tp.statements:\n stmts = stmts_to_json(tp.statements)\n res = {'statements': stmts}\n return res\n else:\n res = {'statements': []}\n return res",
"def _parse_questions(self, text, params):\n\n try:\n data = json.loads(text)\n except ValueError as e:\n Utils.log(traceback.format_exc())\n raise Exception('Could not get content')\n\n output = {}\n output['count'] = data['count']\n output['pages'] = data['pages']\n output['page'] = params['page'] if 'page' in params else 1\n output['questions'] = []\n for q in data['questions']:\n output['questions'].append(self.convert_question(q))\n\n return output",
"def processRequest(data):\n text, key, lang = data[0], data[1], data[2]\n prob = profanityCheck(text)\n if prob >= 0.7:\n return {text: prob}\n\n print(\"Analysing text %s, of the language %s\" % (text, lang))\n return {text: makePerspectiveRequest(text, key, lang)}",
"def predict_json(self, inputs: JsonDict) -> JsonDict:\n sent_tokens = self._tokenizer.tokenize(inputs[\"sentence\"])\n\n # Find all verbs in the input sentence\n pred_ids = [\n i\n for (i, t) in enumerate(sent_tokens)\n if t.pos_ == \"VERB\" or (self._language.startswith(\"en_\") and t.pos_ == \"AUX\")\n ]\n\n # Create instances\n instances = [\n self._json_to_instance({\"sentence\": sent_tokens, \"predicate_index\": pred_id})\n for pred_id in pred_ids\n ]\n\n # Run model\n outputs = [\n [sanitize_label(label) for label in self._model.forward_on_instance(instance)[\"tags\"]]\n for instance in instances\n ]\n\n # Consolidate predictions\n pred_dict = consolidate_predictions(outputs, sent_tokens)\n\n # Build and return output dictionary\n results = {\"verbs\": [], \"words\": sent_tokens}\n\n for tags in pred_dict.values():\n # Join multi-word predicates\n tags = join_mwp(tags)\n\n # Create description text\n description = make_oie_string(sent_tokens, tags)\n\n # Add a predicate prediction to the return dictionary.\n results[\"verbs\"].append(\n {\n \"verb\": get_predicate_text(sent_tokens, tags),\n \"description\": description,\n \"tags\": tags,\n }\n )\n\n return sanitize(results)",
"def _parse_question_json(self, text, params={}):\n\n try:\n data = json.loads(text)\n except ValueError as e:\n Utils.log(traceback.format_exc())\n raise Exception('Could not get content')\n\n output = self.convert_question(data)\n output['body'] = self.markdown(output['body'])\n\n return output",
"def get_p(input_file,output_file):\n result = []\n with codecs.open(input_file, 'r', 'utf-8') as fr:\n for line in fr:\n try:\n dic = json.loads(line.strip())\n # print(dic)\n except:\n continue\n spo_list = dic['spo_list']\n # print(spo_list)\n p_list = [item['predicate'] for item in spo_list]\n for p in p_list:\n result.append([json.dumps(dic),p])\n # print(result)\n\n with codecs.open(output_file, 'w', 'utf-8') as wd:\n for line in result:\n wd.writelines('\\t'.join(line) + '\\n')",
"def data_from_string(text):\n return json_load(text.replace(']],\\n', ']], '))",
"def formula_search_to_dict(raw_result):\n return [\n {\n \"id\": item.cluster_id,\n \"text\": item.text,\n \"n_entries\": item.n_entries,\n \"n_texts\": item.unique_text,\n \"verb_text\": item.verb_text,\n }\n for item in raw_result\n ]",
"def convert(self, text):\n\n obj = {}\n test = {}\n try:\n # Examine the python code to ensure it it is safe to evaluate.\n # We will evaluate each statement and see if it is safe.\n # None and numbers are ignored unless it is in the `arg` dictionary.\n # All the top level variables are either string, boolean, list of strings, or dictionary.\n code = ast.parse(text)\n for snippet in code.body:\n if ast_class(snippet) == 'Assign' and len(snippet.targets) == 1:\n target = snippet.targets[0]\n if ast_class(target) == 'Name':\n name = target.id\n class_name = ast_class(snippet.value)\n if name in self.string_keys and class_name == 'Str':\n obj[name] = compile_expr(snippet.value)\n elif name in self.bool_keys and class_name == 'Name' and snippet.value.id in ('True', 'False'):\n obj[name] = compile_expr(snippet.value)\n elif name == 'scope_filter' and class_name == 'List':\n if all(ast_class(l) == 'Str' for l in snippet.value.elts):\n obj[name] = compile_expr(snippet.value)\n elif name == 'args' and class_name == 'Dict':\n if self.eval_dict(snippet.value):\n obj[name] = compile_expr(snippet.value)\n elif name == 'test' and class_name == 'Dict':\n if self.eval_dict(snippet.value):\n test = compile_expr(snippet.value)\n except Exception as e:\n error('Could not read rule settings!\\n\\n%s' % str(e))\n return None, None\n return obj, test",
"def parse(res):\n res=res[0]\n steps = []\n if not isinstance(res, dict):\n return {}\n\n for step in res[\"legs\"][0][\"steps\"]:\n instruction = re.sub('<[^<]+?>', '', step[\"html_instructions\"])\n distance = step[\"distance\"][\"text\"]\n duration = step[\"duration\"][\"text\"]\n\n if step[\"travel_mode\"] == \"TRANSIT\":\n departure_stop = step[\"transit_details\"][\"departure_stop\"][\"name\"]\n arrival_stop = step[\"transit_details\"][\"arrival_stop\"][\"name\"]\n departure_time = step[\"transit_details\"][\"departure_time\"][\"text\"]\n arrival_time = step[\"transit_details\"][\"arrival_time\"][\"text\"]\n num_stops = step[\"transit_details\"][\"num_stops\"]\n bus_name = step[\"transit_details\"][\"headsign\"]\n\n steps.append({\n \"distance\": distance,\n \"duration\": duration,\n \"instruction\": instruction,\n \"bus_name\": bus_name,\n \"num_stops\": num_stops,\n \"arrival_time\": arrival_time,\n \"departure_time\": departure_time,\n \"departure_stop\": departure_stop,\n \"arrival_stop\": arrival_stop,\n \"travel_mode\": \"TRANSIT\"\n })\n else:\n substeps = []\n if \"steps\" in step:\n for step2 in step[\"steps\"]:\n instruction2 = re.sub('<[^<]+?>', '', step2[\"html_instructions\"])\n distance2 = step2[\"distance\"][\"text\"]\n duration2 = step2[\"duration\"][\"text\"]\n\n substeps.append({\n \"distance\": distance2,\n \"duration\": duration2,\n \"instruction\": instruction2\n })\n steps.append({\n \"distance\": distance,\n \"duration\": duration,\n \"instruction\": instruction,\n \"substeps\": substeps,\n \"travel_mode\": step[\"travel_mode\"]\n })\n\n return {\n \"arrival_time\": res[\"legs\"][0].get(\"arrival_time\", {}).get(\"text\", None),\n \"departure_time\": res[\"legs\"][0].get(\"departure_time\", {}).get(\"text\", None),\n \"end_address\": res[\"legs\"][0][\"end_address\"],\n \"start_address\": res[\"legs\"][0][\"start_address\"],\n \"distance\": res[\"legs\"][0][\"distance\"][\"text\"],\n \"duration\": res[\"legs\"][0][\"duration\"][\"text\"],\n \"steps\": steps,\n }",
"def parse(file_text, predicates):\n\n # skip lines with user-defined predicates\n lines = file_text.split('\\n')\n new_text=\"\"\n for line in lines:\n if not(len(line)!=0 and line[0]=='#'):\n new_text+=line\n\n formula=[]\n element=\"\"\n left=0 # number of parentheses\n right=0\n for c in new_text:\n # parentheses\n if c=='(' or c==')':\n if element!=\"\":\n formula.append(element)\n element=\"\"\n formula.append(c)\n if c=='(':\n left+=1\n if c==')':\n right+=1\n \n # skip spaces\n elif c.isspace() and element!=\"\":\n formula.append(element)\n element=\"\"\n \n # load whole element\n elif not c.isspace():\n element+=c\n\n if left!=right:\n raise SyntaxError(\"Invalid form of input formula (parentheses not matching).\")\n \n\n #create_tree(formula, predicates)\n a = create_automaton(formula, predicates) \n edit_transitions(a)\n return a",
"def parse(self, *_args, **_kwargs):\n cli_output = self._task_args.get(\"text\")\n res = self._check_reqs()\n if res.get(\"errors\"):\n return {\"errors\": res.get(\"errors\")}\n\n template_path = self._task_args.get(\"parser\").get(\"template_path\")\n if template_path and not os.path.isfile(template_path):\n return {\n \"error\": \"error while reading template_path file {file}\".format(\n file=template_path\n )\n }\n try:\n template = open(self._task_args.get(\"parser\").get(\"template_path\"))\n except IOError as exc:\n return {\"error\": to_native(exc)}\n\n re_table = textfsm.TextFSM(template)\n fsm_results = re_table.ParseText(cli_output)\n\n results = list()\n for item in fsm_results:\n results.append(dict(zip(re_table.header, item)))\n\n return {\"parsed\": results}",
"def parse_mapzen_response(txt):\n\tdictionary = {}\n\tdata = json.loads(txt)\n\tif data['features']:\n\t\tdictionary['status'] = 'OK'\n\t\tcurrent = data['features'][0]\n\t\tprops = current['properties']\n\t\tdictionary['confidence'] = props['confidence']\n\t\tdictionary['label'] = props['label']\n\n\t\tcoordinates = current['geometry']['coordinates']\n\t\tdictionary['longitude'] = coordinates[0]\n\t\tdictionary['latitude'] = coordinates[1]\n\telse:\n\t\tdictionary['status'] = None\n\n\treturn dictionary",
"def parse_text(self, text: str) -> SectionDict:",
"def parse(self, text):\n\n goal = NLUGoal()\n goal.text = str(text)\n self._nlu_client.send_goal_and_wait(goal)\n result = self._nlu_client.get_result()\n\n #no intent found, return None \n if result.intentName == \"\":\n return None, None, None\n else:\n #parse\n slot_info = json.loads(result.slot_json_string)\n return result.intentName, result.probability, slot_info",
"def createFromJson(jsonStr: str) -> Dict[str, Any]:\n jham = json.loads(jsonStr)\n clearCache(jham)\n\n # Transform the control operators\n for key in jham[\"control\"]:\n ctrls = jham[\"control\"][key]\n # Modify the matrices\n if isinstance(ctrls[\"matrices\"], list):\n mats = []\n for mat in ctrls[\"matrices\"]:\n mats.append(dictMatrixToNumpyMatrix(mat, complex))\n ctrls[\"matrices\"] = mats\n else:\n ctrls[\"matrices\"] = dictMatrixToNumpyMatrix(ctrls[\"matrices\"], complex)\n\n # Transform the drift operators\n for key in jham[\"drift\"]:\n drifts = jham[\"drift\"][key]\n # Modify the matrices\n if isinstance(drifts[\"matrices\"], list):\n mats = []\n for mat in drifts[\"matrices\"]:\n mats.append(dictMatrixToNumpyMatrix(mat, complex))\n drifts[\"matrices\"] = mats\n else:\n drifts[\"matrices\"] = dictMatrixToNumpyMatrix(drifts[\"matrices\"], complex)\n\n return jham",
"def vsepr_parse_user_answer(user_input):\r\n return json.loads(user_input)",
"def parse_lti_2_0_result_json(self, json_str):\r\n try:\r\n json_obj = json.loads(json_str)\r\n except (ValueError, TypeError):\r\n msg = \"Supplied JSON string in request body could not be decoded: {}\".format(json_str)\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n # the standard supports a list of objects, who knows why. It must contain at least 1 element, and the\r\n # first element must be a dict\r\n if type(json_obj) != dict:\r\n if type(json_obj) == list and len(json_obj) >= 1 and type(json_obj[0]) == dict:\r\n json_obj = json_obj[0]\r\n else:\r\n msg = (\"Supplied JSON string is a list that does not contain an object as the first element. {}\"\r\n .format(json_str))\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n # '@type' must be \"Result\"\r\n result_type = json_obj.get(\"@type\")\r\n if result_type != \"Result\":\r\n msg = \"JSON object does not contain correct @type attribute (should be 'Result', is {})\".format(result_type)\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n # '@context' must be present as a key\r\n REQUIRED_KEYS = [\"@context\"] # pylint: disable=invalid-name\r\n for key in REQUIRED_KEYS:\r\n if key not in json_obj:\r\n msg = \"JSON object does not contain required key {}\".format(key)\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n # 'resultScore' is not present. If this was a PUT this means it's actually a DELETE according\r\n # to the LTI spec. We will indicate this by returning None as score, \"\" as comment.\r\n # The actual delete will be handled by the caller\r\n if \"resultScore\" not in json_obj:\r\n return None, json_obj.get('comment', \"\")\r\n\r\n # if present, 'resultScore' must be a number between 0 and 1 inclusive\r\n try:\r\n score = float(json_obj.get('resultScore', \"unconvertable\")) # Check if float is present and the right type\r\n if not 0 <= score <= 1:\r\n msg = 'score value outside the permitted range of 0-1.'\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n except (TypeError, ValueError) as err:\r\n msg = \"Could not convert resultScore to float: {}\".format(err.message)\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n return score, json_obj.get('comment', \"\")",
"def test_parse():\n first = parse_formula(\"PO4H2(CH2)12CH3\")\n assert first == {\"P\":1, \"O\":4, \"H\":29, \"C\":13}\n\n second = parse_formula(\"H2O\")\n assert second == {\"H\":2, \"O\":1}",
"def basic(text, filters=None):\n ## format text from user input to something queriable\n text = re.sub('[^a-zA-Z\\d\\s]', '', text).replace(\" \", \"+\")\n\n ## construct EFO query url\n url = 'http://www.ebi.ac.uk/spot/zooma/v2/api/services/annotate?propertyValue='\n url = url + text\n if not filters is None:\n url = url + \"&filter=required:[\"+\",\".join(filters)+\"]\"\n\n ## run query, if zooma failure, then return nothing\n try:\n responses = requests.get(url).json()\n except:\n return dict()\n\n ## parse xml for results\n natural = [response['annotatedProperty']['propertyValue'] for response in responses]\n efo_urls = [response['semanticTags'] for response in responses]\n efo_urls = unlist(efo_urls)\n efo_pattern = re.compile('EFO_[0-9]+')\n efo_terms = [efo_pattern.findall(url) for url in efo_urls] \n efo_terms = unlist(efo_terms)\n return dict(zip(efo_terms, natural))",
"def process_data(self, json_dict: dict):\n all_token_ids = []\n all_level_ids = []\n all_synset_ids = []\n all_lemma_ids = []\n all_is_highway = []\n all_targets = []\n\n def tokenize(lemma_):\n return self.tokenizer(\n lemma_,\n add_special_tokens=False,\n truncation=True,\n is_split_into_words=True,\n return_token_type_ids=False,\n ).input_ids\n\n def add_lemma(lemma_, abs_level_, synset_id_, is_highway_):\n lemma_token_ids = tokenize([lemma_])\n n_tokens_ = len(lemma_token_ids)\n token_ids.extend(lemma_token_ids)\n level_ids.extend([self.level_to_id[abs_level_]] * n_tokens_)\n synset_ids.extend([synset_id_] * n_tokens_)\n lemma_ids.extend([lemma_ids[-1] + 1] * n_tokens_)\n is_highway.extend([is_highway_] * n_tokens_)\n\n # Go through all JSON entries\n for synset in tqdm(json_dict.values()):\n token_ids = []\n level_ids = []\n synset_ids = [0]\n lemma_ids = [0]\n is_highway = []\n\n lemmas = [l.replace(\"_\", \" \") for l in synset[\"lemmas\"]]\n abs_level = (\"current\", \"current\")\n\n # Save all lemmas of the current node\n synset_token_ids = self.tokenizer.batch_encode_plus(lemmas,\n add_special_tokens=False,\n return_token_type_ids=False).input_ids\n all_targets.append(synset_token_ids)\n\n for level in (\"hypernyms\", \"hyponyms\"):\n for sub_synset in synset[level].values():\n if \"lemmas\" in sub_synset:\n lemmas = [l.replace(\"_\", \" \") for l in sub_synset[\"lemmas\"]]\n abs_level = (level, \"current\")\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n for sub_level in (\"hypernyms\", \"hyponyms\"):\n for sub_sub_lemmas in sub_synset[sub_level].values():\n lemmas = [l.replace(\"_\", \" \") for l in sub_sub_lemmas]\n abs_level = (level, sub_level)\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n # Append the global lists\n all_token_ids.append(token_ids)\n all_level_ids.append(level_ids)\n all_synset_ids.append(synset_ids[1:])\n all_lemma_ids.append(lemma_ids[1:])\n all_is_highway.append(is_highway)\n\n data = (\n all_token_ids,\n all_level_ids,\n all_synset_ids,\n all_lemma_ids,\n all_is_highway,\n all_targets\n )\n\n return data",
"def getConcept(result_json):\n answers = {}\n answers[0] = result_json['outputs'][0][\"data\"]['concepts'][0]['name']\n answers[1] = result_json['outputs'][0][\"data\"]['concepts'][1]['name']\n answers[2] = result_json['outputs'][0][\"data\"]['concepts'][2]['name']\n answers[3] = result_json['outputs'][0][\"data\"]['concepts'][3]['name']\n answers[4] = result_json['outputs'][0][\"data\"]['concepts'][4]['name']\n return answers",
"def test_json_syntax(self):\n\n base = abs_path('./specs/')\n ps = Parser(base+'script3-1.py', base)\n\n # empty - should parse\n spec = {}\n read_wrapper(spec, ps)\n\n # empty array - should parse\n spec = {'constraints': []}\n read_wrapper(spec, ps)\n\n # empty element - should fail\n spec = {'constraints': [{}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching block - should fail\n spec = {'constraints': [{'block': 'a'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # no matching variable - should fail\n spec = {'constraints': [{'variable': 'c'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner option - should fail\n spec = {'constraints': [{'option': 'a1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # loner block - should parse\n spec = {'constraints': [{'block': 'A', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # block and option - should parse\n spec = {'constraints': [{'block': 'A', 'option': 'a1', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variable and option - should parse\n spec = {'constraints': [{'variable': 'a', 'option': '2.5', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # weird option - should parse\n # fixme: {'option': '[1,2]'} will fail\n spec = {'constraints': [{'variable': 'c', 'option': '[1, 2]', 'condition': 'B==b1'}]}\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H==b1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)\n\n # variables in condition do not match - should fail\n spec = {'constraints': [{'block': 'A', 'condition': 'H.index==1'}]}\n with self.assertRaises(ParseError):\n read_wrapper(spec, ps)",
"def parse_survey_details():\n json_data = open('/Users/williamliu/GitHub/surveys/get_survey_details.json')\n loaded_data = json.load(json_data)\n\n print loaded_data['data']['pages'][1]['questions'][1]['heading']\n # I am preoccupied with more than one person I help",
"def get_text_prediction():\n json = request.get_json()\n print(json)\n if len(json['text']) == 0:\n return jsonify({'error': 'invalid input'})\n\n return jsonify({'you sent this': json['text']})",
"def __call__(self, json_res):\r\n id2hyps = {\r\n res['clip_id']: [_remove_nonascii(res['descs'][0]['desc'].strip())]\r\n for res in json_res\r\n }\r\n id2hyps = self.tokenizer.tokenize(id2hyps)\r\n assert len(id2hyps) == len(self.id2refs)\r\n\r\n ret_scores = {}\r\n for scorer, method in self.scorers:\r\n print(f\"Computing {method} score...\")\r\n score, scores = scorer.compute_score(self.id2refs, id2hyps)\r\n if isinstance(method, list):\r\n for sc, scs, m in zip(score, scores, method):\r\n ret_scores[m] = sc * 100\r\n else:\r\n ret_scores[method] = score * 100\r\n\r\n return ret_scores",
"def find_json(data):\n if data.startswith(\"------------------------------ \\n\"\n \"QUERY PLAN DESCRIPTION: \\n\"\n \"------------------------------\"):\n # Vertica-like\n data = data.split(\"JSON format:\\n\")[1].split(\"End JSON format\")[0]\n return data",
"def extract_spacy(self, text: str)->dict:\n ners=None\n try:\n persons=[]\n locations=[]\n orgs=[]\n misc=[]\n docs=[]\n if len(text)>1000000:\n docs=self._splitCount(text,1000000)\n else:\n docs.append(text)\n for doc in docs:\n doc_spacy = self.recognizer(doc)\n for token in doc_spacy:\n if token.ent_type_ == \"PER\":\n persons.append(token.text)\n if token.ent_type_ == \"LOC\":\n locations.append(token.text)\n if token.ent_type_ == \"ORG\":\n orgs.append(token.text)\n if token.ent_type_ == \"MISC\":\n misc.append(token.text)\n ners={\"persons\":list(set(persons)), \"locations\":list(set(locations)),\"orgs\":list(set(orgs)), \"misc\":list(set(misc))}\n except Exception as ex:\n print('Exception while extracting NERs')\n print(str(ex))\n finally:\n return ners"
]
| [
"0.5605068",
"0.5600367",
"0.54364705",
"0.52712244",
"0.5269811",
"0.52465916",
"0.51889664",
"0.5111044",
"0.5047918",
"0.50293493",
"0.49568862",
"0.4936398",
"0.49341658",
"0.49337515",
"0.49258518",
"0.4897983",
"0.48910117",
"0.48890945",
"0.48828137",
"0.48752654",
"0.48591948",
"0.4841173",
"0.4838427",
"0.48356748",
"0.4824536",
"0.48214784",
"0.47952318",
"0.47840837",
"0.4761539",
"0.47615063"
]
| 0.63283634 | 0 |
Returns a tuple of (constants, functions, properties) constants a sorted list of (name, features) tuples, including all constants except for SCLEX_ constants which are presumed not used by scripts. The SCI_ constants for functions are omitted, since they can be derived, but the SCI_ constants for properties are included since they cannot be derived from the property names. functions a sorted list of (name, features) tuples, for the features that should be exposed to script as functions. This includes all 'fun' functions; it is up to the program to decide if a given function cannot be scripted. It is also up to the caller to export the SCI_ constants for functions. properties a sorted list of (name, property), where property is a | def GetScriptableInterface(f):
constants = [] # returned as a sorted list
functions = {} # returned as a sorted list of items
properties = {} # returned as a sorted list of items
for name in f.order:
features = f.features[name]
if features["Category"] != "Deprecated":
if features["FeatureType"] == "val":
constants.append( (name, features) )
elif features["FeatureType"] in ["fun","get","set"]:
if features["FeatureType"] == "get":
propname = name.replace("Get", "", 1)
properties[propname] = (name, properties.get(propname,(None,None))[1])
elif features["FeatureType"] == "set":
propname = name.replace("Set", "", 1)
properties[propname] = (properties.get(propname,(None,None))[0], name)
else:
functions[name] = features
propertiesCopy = properties.copy()
for propname, (getterName, setterName) in propertiesCopy.items():
getter = getterName and f.features[getterName]
setter = setterName and f.features[setterName]
getterValue, getterIndex, getterIndexName, getterType = 0, None, None, None
setterValue, setterIndex, setterIndexName, setterType = 0, None, None, None
propType, propIndex, propIndexName = None, None, None
isok = (getterName or setterName) and not (getter is setter)
if isok and getter:
if getter['Param2Type'] == 'stringresult':
getterType = getter['Param2Type']
else:
getterType = getter['ReturnType']
getterType = ConvertEnu(getterType)
getterValue = getter['Value']
getterIndex = getter['Param1Type'] or 'void'
getterIndexName = getter['Param1Name']
isok = ((getter['Param2Type'] or 'void') == 'void') or (getterType == 'stringresult')
if isok and setter:
setterValue = setter['Value']
setterType = ConvertEnu(setter['Param1Type']) or 'void'
setterIndex = 'void'
if (setter['Param2Type'] or 'void') != 'void':
setterIndex = setterType
setterIndexName = setter['Param1Name']
setterType = ConvertEnu(setter['Param2Type'])
isok = (setter['ReturnType'] == 'void') or (setter['ReturnType'] == 'int' and setterType=='string')
if isok and getter and setter:
isok = ((getterType == setterType) or (getterType == 'stringresult' and setterType == 'string')) and (getterIndex == setterIndex)
propType = getterType or setterType
propIndex = getterIndex or setterIndex
propIndexName = getterIndexName or setterIndexName
if isok:
# do the types appear to be useable? THIS IS OVERRIDDEN BELOW
isok = (propType in ('int', 'position', 'line', 'pointer', 'colour', 'colouralpha', 'bool', 'string', 'stringresult')
and propIndex in ('void','int','position','line','string','bool'))
# getters on string properties follow a different protocol with this signature
# for a string getter and setter:
# get int funcname(void,stringresult)
# set void funcname(void,string)
#
# For an indexed string getter and setter, the indexer goes in
# wparam and must not be called 'int length', since 'int length'
# has special meaning.
# A bool indexer has a special meaning. It means "if the script
# assigns the language's nil value to the property, call the
# setter with args (0,0); otherwise call it with (1, value)."
#
# Although there are no getters indexed by bool, I suggest the
# following protocol: If getter(1,0) returns 0, return nil to
# the script. Otherwise return getter(0,0).
if isok:
properties[propname] = {
"GetterValue" : getterValue,
"SetterValue" : setterValue,
"PropertyType" : propType,
"IndexParamType" : propIndex,
"IndexParamName" : propIndexName,
# The rest of this metadata is added to help generate documentation
"Category" : (getter or setter)["Category"],
"GetterName" : getterName,
"SetterName" : setterName,
"GetterComment" : CommentString(getter),
"SetterComment" : CommentString(setter)
}
#~ print(properties[propname])
# If it is exposed as a property, the constant name is not picked up implicitly
# (because the name is different) but its constant should still be exposed.
if getter:
constants.append( ("SCI_" + getterName.upper(), getter))
if setter:
constants.append( ("SCI_" + setterName.upper(), setter))
else:
# Cannot parse as scriptable property (e.g. not symmetrical), so export as functions
del(properties[propname])
if getter:
functions[getterName] = getter
if setter:
functions[setterName] = setter
funclist = list(functions.items())
funclist.sort()
proplist = list(properties.items())
proplist.sort()
constants.sort()
return (constants, funclist, proplist) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_functions():\n\treturn [f for f in globals() if f.startswith('make_')]",
"def get_constants(self):\n return self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12",
"def list_step_functions() -> List[str]:\n return list(STEP_SCORES_MAP.keys())",
"def get_defined_constants():\n raise NotImplementedError()",
"def get_constants_list(self):\n return [self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12]",
"def get_func_tuples():\n func_tuples = [\n ('met_gumeJ1_3sopt_tr20', 'Rel-UME J1', 'C1-.'),\n ('met_gumeJ5_3sopt_tr20', 'Rel-UME J5', 'r-^'),\n ('met_gfssdJ1_3sopt_tr20', 'Rel-FSSD J1', 'C4--'),\n ('met_gfssdJ5_3sopt_tr20', 'Rel-FSSD J5', 'b-x'),\n\n ('met_gmmd_med', 'Rel-MMD', 'k-.'),\n ('met_gmmd_med_bounliphone', 'Rel-MMD medboun', 'k-'),\n\n ('met_gfssdJ1_3sopt_tr50', 'FSSD-opt3 J1', 'b-^'),\n ('met_gfssdJ5_3sopt_tr50', 'FSSD-opt3 J5', 'b-.h'),\n\n ('met_gumeJ1_2V_rand', 'UME-rand J1', 'r--^'),\n ('met_gumeJ1_1V_rand', 'UME-rand J1 1V', 'y-'),\n ('met_gumeJ2_2V_rand', 'UME-rand J2', 'g--^'),\n ('met_gumeJ3_2V_rand', 'UME-rand J3', 'b--^'),\n ('met_gumeJ5_2V_rand', 'UME-rand J5', 'k--^'),\n\n ('met_gumeJ1_2sopt_tr20', 'Rel-UME-opt2 J1', 'C2-.'),\n ('met_gumeJ5_2sopt_tr20', 'Rel-UME-opt2 J5', 'g-'),\n ('met_gumeJ1_2sopt_tr50', 'Rel-UME-opt2 J1', 'r-.h'),\n\n ('met_gumeJ1_3sopt_tr50', 'UME-opt3 J1', 'r-'),\n ('met_gumeJ5_3sopt_tr50', 'UME-opt3 J5', 'k-'),\n\n\n ]\n return func_tuples",
"def _build_functions_list():\n return {\"ec2-sg\": _build_ec2_mapping_from_sg,\n \"ec2-resources\": _build_ec2_mapping_from_resources,\n \"rds-sg\": _build_rds_mapping_from_sg,\n \"rds-resources\": _build_rds_mapping_from_resources,\n \"elbv2-sg\": _build_elbv2_mapping_from_sg,\n \"elbv2-resources\": _build_elbv2_mapping_from_resources}",
"def extract_constants(func):\n const_dict = {}\n params = len(func.params)\n new_func, consts = ExtractConstants().extract_constants(func)\n for i, const in enumerate(consts):\n const_dict[params + i] = const\n\n new_func = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(new_func))[\"main\"]\n return new_func, const_dict",
"def gen_funcs_with_const(self):\n func_with_const = '\\n (func (export \"{lane_type}.{op}_with_const_{cnt}\") (result v128) ({lane_type}.{op} {param_1} {param_2}))'\n func_with_param_and_const = '\\n (func (export \"{lane_type}.{op}_with_const_{cnt}\") (param v128) (result v128) ({lane_type}.{op} (local.get 0) {param_1}))'\n funcs = ''\n cnt = 0\n for op in self.BINARY_OPS:\n for param_1, param_2 in self.get_test_data_with_const:\n funcs += func_with_const.format(lane_type=self.LANE_TYPE,\n op=op,\n param_1=SIMD.v128_const(param_1, self.LANE_TYPE),\n param_2=SIMD.v128_const(param_2, self.LANE_TYPE),\n cnt=cnt)\n cnt += 1\n\n for op in self.BINARY_OPS:\n for param_1, param_2 in self.get_test_data_with_const:\n funcs += func_with_param_and_const.format(lane_type=self.LANE_TYPE,\n op=op,\n param_1=SIMD.v128_const(param_1, self.LANE_TYPE),\n cnt=cnt)\n cnt += 1\n\n return funcs",
"def _get_functions():\n\n # Get all functions that start with _office.\n fcts = {fct_name[len(FCT_PREFIX):]: fct for (fct_name, fct) in\n globals().iteritems() if fct_name.startswith(FCT_PREFIX) and\n hasattr(fct, \"__call__\")}\n\n return fcts",
"def get_constants(self):\n temp = self._properties.get('constants', [])\n return temp",
"def get_funcs(self,var):\n fname = (var+\".p\") \n pickle_path = os.path.join(CWD_PATH,self.join_path,self.pick_path,fname)\n [coef,powers,intercept,mins,maxes] = pickle.load(open(pickle_path,'rb'))\n \n # The 3 function variables you need to-recreate this model & the min & max to set this in the environment.\n out = {'coef': coef, 'powers':powers,'intercept':intercept}\n return out, mins, maxes",
"def extract_constants_and_predicates(planning_problem: PlanningProblem) -> Tuple[List[Expr],\n List[Tuple[Expr, int]],\n Dict[str, Expr]]:\n seen_predicates = set()\n seen_constants = set()\n constants_per_predicate = collections.defaultdict(list)\n\n initial_predicates = planning_problem.initial\n # Make all predicates positive so we can extract the name via predicate.op\n goal_predicates = list(map(make_positive, planning_problem.goals))\n precondition_predicates = list(map(make_positive, [p for a in planning_problem.actions for p in a.precond]))\n postcondition_predicates = list(map(make_positive, [e for a in planning_problem.actions for e in a.effect]))\n\n all_predicates = initial_predicates + goal_predicates + precondition_predicates + postcondition_predicates\n\n for predicate in all_predicates:\n if predicate.op not in seen_predicates and not is_variable(predicate.op):\n seen_predicates.add((predicate.op, len(predicate.args)))\n for arg in predicate.args:\n if arg not in seen_constants and not is_variable(arg):\n seen_constants.add(arg)\n constants_per_predicate[predicate.op].append(arg)\n\n return list(seen_constants), list(seen_predicates), constants_per_predicate",
"def gen_funcs_combination(self):\n funcs = '\\n\\n;; Combination'\n funcs += '\\n(module'\n\n assert_template = ' (func (export \"{lane_type}.{op1}-{lane_type}.{op2}\") (param v128 v128 v128) (result v128) ' \\\n '({lane_type}.{op1} ({lane_type}.{op2} (local.get 0) (local.get 1))(local.get 2))' \\\n ')'\n\n binary_ops = list(self.BINARY_OPS)\n binary_ops.reverse()\n for op1 in self.BINARY_OPS:\n for op2 in binary_ops:\n funcs += '\\n' + assert_template.format(lane_type=self.LANE_TYPE, op1=op1, op2=op2)\n\n funcs += '\\n)'\n return funcs",
"def get_sim_funs():\n # Get all the functions\n functions = [affine,\n hamming_dist, hamming_sim,\n lev_dist, lev_sim,\n jaro,\n jaro_winkler,\n needleman_wunsch,\n smith_waterman,\n overlap_coeff, jaccard, dice,\n monge_elkan, cosine,\n exact_match, rel_diff, abs_norm]\n # Return a dictionary with the functions names as the key and the actual\n # functions as values.\n return dict(zip(sim_function_names, functions))",
"def listFeatures() :\n global features\n features = [feature.split(\".\")[0] for feature in os.listdir(os.path.abspath(__file__)[:-11])\n if feature.endswith(\".py\") and feature != \"__init__.py\"]",
"def get_functions(self):\n\n functions = []\n for scenario in self.scenarios:\n functions.extend(scenario.functions)\n\n return functions",
"def get_rdkit_descriptor_functions():\n ret = [\n (name, f)\n for name, f in inspect.getmembers(Descriptors)\n if inspect.isfunction(f) and not name.startswith(\"_\")\n ]\n # some which are not in the official Descriptors module we need to add manually\n ret.extend([(\"FormalCharge\", Chem.GetFormalCharge), (\"SSSR\", Chem.GetSSSR)])\n ret.sort()\n return ret",
"def funcs_in_script(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n\n N = len(lines)\n funcs = []\n for n in range(N):\n line = lines[n]\n\n ###################################################\n # RETRIEVE FUNCTION NAME #\n ###################################################\n if not line[:4] == 'def ':\n continue\n if not '(' in line:\n continue\n end = line.index('(')\n name = line[4:end]\n\n ###################################################\n # RETRIEVE DOCSTRING HEADER #\n ###################################################\n header = ''\n for m in range(n, N - 1):\n line = lines[m]\n\n # this should not happen (when coded in python syntax, a closing\n # parenthesis must appear first)\n if m > n and line[:4] == 'def ':\n break\n\n # this marks the end of the function definition\n if '):' in line:\n hline = lines[m + 1] # potential docstring header line\n # if it exists, then here\n\n\n # remove leading white spaces:\n while hline[0] == ' ':\n hline = hline[1:]\n\n # check whether it is in fact (the start of) a docstring\n if hline[:3] not in ['\"\"\"', \"'''\"]:\n break\n\n # take the first line of this docstring\n header = hline[3:-1]\n\n # remove docstring closing:\n if header[-3:] in ['\"\"\"', \"'''\"]:\n header = header[:-3]\n\n # ignore outdated functions if labelled as such:\n if header.lower()[:10] == '[outdated]':\n name = None\n if header.lower()[:1] == '*':\n name = None\n break\n\n if name is None:\n continue\n\n funcs.append([name, header])\n\n return funcs",
"def setup(self):\n declared = []\n for obj in Rt.objective:\n var_list = split(\"[+*/-]\", obj)\n for v in var_list:\n if v not in declared:\n self.add_input(v)\n declared.append(v)\n self.add_output(\"Objective function \" + obj)",
"def help() : \n\n import types\n\n globs = globals()\n for key, val in globs.iteritems() :\n if isinstance(val, types.FunctionType ) :\n print key\n #for obj in globals() :\n # print obj\n # print obj.callable()\n print globals()['main'].__doc__",
"def workflowLessTypes(self):\n\n tools = [c.getName() for c in\n self.atgenerator.getGeneratedTools(self.package)\n if not\n utils.isTGVFalse(c.getTaggedValue('autoinstall'))]\n tools.sort()\n return tools",
"def allFunctions(self):\n\t\tmodulos=sublime.decode_value(open(RutasPython.funciones()).read())\n\t\tlista=[]\n\t\tfor modulo in modulos:\n\t\t\tlista+=[ (funcion+\"\\t•\"+modulo, self.ponerCursor(modulo+\".\"+funcion)) for funcion in modulos[modulo]]\n\t\treturn sorted(lista)",
"def functions(self):\n return [v for v in self.globals.values()\n if isinstance(v, values.Function)]",
"def available_functions(self):\n return self.config.keys()",
"def _minimal() -> list:\n\n return ['r2', 'mape', 'nrmse', 'corr_coeff', 'rmse', 'mae', 'mse', 'mpe', \n 'mase', 'r2_score']",
"def getdefflags(config_nm):\n if config_nm is 'train':\n user_params = user_params_train\n elif config_nm is 'eval':\n user_params = user_params_eval\n elif config_nm is 'tfrecorder':\n user_params = user_params_recorder\n else:\n print('Unrecognized configuration name : %s, exiting ....' % config_nm)\n exit(-1)\n\n\n return mandatory_params+user_params",
"def get_scoped_to_feature() -> Iterable[Feature] | Feature | None:\n return (Feature.CHORES, Feature.PROJECTS)",
"def getGroupFuncs(self):\n\n funcs = []\n for p in self.Parameters:\n if p.arg_name[0:8] == \"Function\" and p.arg_value:\n fct, attr = p.arg_value.split(':')\n if fct and attr:\n funcs.append((fct, attr))\n if not funcs:\n funcs.append(('count', '*'))\n return funcs",
"def test_Protein_2_FunctionEnum_and_Score_sparse_vs_flatfile_ex_2():\n UniProtID_2_test = conftest.get_random_human_ENSP(num_ENSPs=1, UniProt_ID=True)[0]\n funcEnum_list_from_sparse, score_list_from_sparse = get_funcEnum_and_score_from_sparse_matrix(UniProtID_2_test, ENSP_2_rowIndex_dict, CSC_ENSPencoding_2_FuncEnum)\n try:\n funcEnum_arr, score_arr = ENSP_2_tuple_funcEnum_score_dict[UniProtID_2_test]\n except KeyError:\n funcEnum_arr, score_arr = [], []\n assert list(funcEnum_arr) == funcEnum_list_from_sparse\n assert list(score_arr) == score_list_from_sparse"
]
| [
"0.5691015",
"0.56161505",
"0.5587567",
"0.55644184",
"0.5510985",
"0.54930717",
"0.5430147",
"0.5411343",
"0.5374079",
"0.53456837",
"0.52303857",
"0.5223214",
"0.51945597",
"0.5171993",
"0.51279044",
"0.5125285",
"0.51191556",
"0.5116045",
"0.5112311",
"0.5022907",
"0.49247202",
"0.4924454",
"0.49156034",
"0.49117383",
"0.4895784",
"0.48749956",
"0.48683032",
"0.48640656",
"0.4854471",
"0.4853742"
]
| 0.6213655 | 0 |
Shows the given View controller. | def showViewController(viewController):
__PyContentViewController__.shared.setRootViewController(viewController) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def debug_view(self):\n\n self.view.show()",
"def show(self):\n self.Show()",
"def show(self):\n self.wid.show()",
"def show(self):\n self.scene().show()",
"def Show(self):\r\n return Control.Show(self)",
"def show(self):\n self._impl.show()",
"def show(self):\n self._window.show()",
"def show(self) -> None:\n show(self._layout)",
"def openController(self, name, parent):\n frame = ICS[name](parent)\n frame.Show()\n return frame",
"def show(self):\n self.window.run_command(\"show_panel\", {\"panel\": self.full_name})",
"def show_window(self):\n self.show()",
"def _showView(self, win, fn=None):\n raise RuntimeError('Not implemented')",
"def show(self):\r\n\t\tself.frame.Show(True)",
"def show(self, window):\r\n\r\n return",
"def display(self, *args, **kwargs):\n return self.show(*args, **kwargs)",
"def show(self):\r\n self.wf.Show()",
"def ShowMe(self, event):\n self.Show(True)",
"def show():\n from siding.addons import ui\n ui.show()",
"def show(*args, **kwargs):\n from . import core\n\n return core.show(*args, **kwargs)",
"def __call__(self):\n self.show()",
"def on_show_view(self):\n arcade.set_background_color(arcade.color.DARK_BLUE_GRAY)\n\n # Enable the UIManager when the view is showm.\n self.manager.enable()",
"def show(self, parent=None):\n # Some Gui's don't like to process all events from a single \n # call to process events (Qt), and pumping the loop is not\n # reliable. Instead, we just schedule the call to set_visible \n # to occur after we start the event loop and with a priority \n # that is less than any relayouts the may be triggered by \n # pending events. This means that the layout queue should \n # finish processing, and then the window will be shown.\n self._prep_window()\n app = self.toolkit.app\n app.schedule(self.set_visible, (True,), priority=75)\n app.start_event_loop()",
"def showFrontView(self):\r\n if(self.dataController.fileLoaded == True):\r\n self.dataController.showFrontView()",
"def show(self,window):\n self.showFunctions(window)",
"def show(self):\n\n pass",
"def showframe(self, frame):\n self.frames[frame].show()",
"def show(self) -> None:\n\t\tself.setIcon(self._icon)\n\t\tself.setText(self._subtitle)\n\t\tself.setInformativeText(self._message)\n\t\tself.setWindowTitle(self._title)\n\t\tself.exec()",
"def show(self):\n self.frame.grid()\n self.visible = True",
"def show(self):\n\n self.image.show()",
"def show(cls, context: DataContext, project: ResearchProject, parent):\n dialog = cls(context, project, parent)\n dialog.exec_()"
]
| [
"0.6725987",
"0.6593572",
"0.6456488",
"0.63563937",
"0.6289247",
"0.62467057",
"0.62319267",
"0.62244475",
"0.6197007",
"0.6121775",
"0.609827",
"0.5939371",
"0.59245074",
"0.59083045",
"0.589036",
"0.5832835",
"0.5802848",
"0.5787498",
"0.56053865",
"0.5597037",
"0.55964655",
"0.55805486",
"0.5544781",
"0.55384403",
"0.5529184",
"0.55186564",
"0.5515552",
"0.55082214",
"0.5491308",
"0.54868877"
]
| 0.7933866 | 0 |
Search a list of (header, value) tuples for a debug header. If found, return the index. Otherwise, return 1. | def find_debug_header(self, header_list):
matchers = {
"debug_uri": ("location", False),
"debug_token": (self.debug_header.lower(), True)
}
for i, htup in enumerate(header_list):
header, val = htup
for attr in matchers:
h, ret = matchers[attr]
if header.lower() == h:
setattr(self, attr, val)
if ret:
return i
else:
return -1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sample_idx(sample, header):\n\n for item in header:\n if sample in item:\n return header.index(item)\n\n print(sample + \" not found in header, check input files.\")\n sys.exit()",
"def index_from_headers(self, item):\n assert item in set(self.headers), f\"String of {item} passed but this is not in headers!\\n{self.headers}\"\n return self.headers.index(item)",
"def FindHeaderLength():\n\n lookup = 'Lateral um'\n \n with open(filename) as myFile:\n for FoundPosition, line in enumerate(myFile, 1):\n if lookup in line:\n print 'Scan Data found at line:', FoundPosition\n break\n \n return FoundPosition+4",
"def find_offset(self,value):\n return self.header.find_offset(value)",
"def find(self, list, key, value):\n for i, dic in enumerate(list):\n if dic[key] == value:\n return i\n return -1",
"def _search(listing, absolute_idx):\n if not listing:\n return 0\n if len(listing) == 1:\n return 0 if absolute_idx <= listing[0] else 1\n\n for idx, line_break_idx in enumerate(listing):\n if line_break_idx >= absolute_idx:\n return idx",
"def _get_header_position(header_row: List[str], column_title: str) -> int:\n for pos, column in enumerate(header_row):\n if column_title.lower() in column.lower():\n return pos\n\n raise Exception(\"Expected column header not found for {}\".format(column_title))",
"def getIndex(self,filt):\n indx = [i for i in xrange(len(self._header)) if filt == self._header[i]]\n return indx",
"def index(liste, value):\n\n for ii in range(len(liste)):\n if liste[ii] == value:\n return ii\n return None",
"def find_nested_index(listing, nested_location, value_to_find):\n for index, item in enumerate(listing):\n if item[nested_location] == value_to_find:\n return index\n raise IndexError",
"def find_header_values(line, nums_dict):\n try:\n for num_key, pattern in HEADER_PAT_DICT.items():\n if nums_dict[num_key] is None:\n pattern_match = pattern.match(line)\n if pattern_match:\n # regex is 1-based\n nums_dict[num_key] = int(pattern_match.group(1))\n return\n except (ValueError, KeyError) as e:\n raise InvalidDataError(\"While reading a data file, encountered error '{}' on line: {}\".format(e, line))",
"def findIndex(lst, key, value):\r\n\r\n for i, dic in enumerate(lst):\r\n if dic['properties'][key] == value:\r\n return i\r\n return -1",
"def check_header(self, magmap):\n for i in range(len(self.mag_map_list)):\n if magmap.fits_header['DATE-OBS'] == self.mag_map_list[i].fits_header['DATE-OBS']:\n return i",
"def index_equals_value_search1(arr):\n for key, value in enumerate(arr):\n if value == key:\n return value\n return -1",
"def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None",
"def find_index(row):\n value = row[index]\n if value in seen:\n return seen[value]\n for row_ in merged.iter_dicts(True):\n if row_[index] == value:\n seen[value] = row_[\"index\"]\n return row_[\"index\"]\n return None",
"def find(lst, key, value):\n\n for i, dic in enumerate(lst):\n if dic[key] == value:\n return i\n return None",
"def debug_info_header(header):\n print(colored(\"Header:\", 'cyan'), colored(\"Valid FDT magic value found\", \"green\", attrs=['bold']))\n print(colored(\"Header\", 'cyan'), \"-> Total Size of file: \",\n colored('{0:>8d} {0:>#8x}'.format(header.totalsize), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Struct Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_struct), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_struct), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to String Block: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_dt_strings), 'yellow'), \" with size: \",\n colored('{0:>8d} {0:>#8x}'.format(header.size_dt_strings), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Offset to Memory Reser: \",\n colored('{0:>8d} {0:>#8x}'.format(header.off_mem_rsvmap), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Version of DTB: \",\n colored('{0:>8d} {0:>#8x}'.format(header.version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Previous Version of DTB:\",\n colored('{0:>8d} {0:>#8x}'.format(header.last_comp_version), 'yellow'))\n print(colored(\"Header\", 'cyan'), \"-> Boot CPU Number: \",\n colored('{0:>8d} {0:>#8x}'.format(header.boot_cpuid_phys), 'yellow'))\n print()",
"def get_header_value(field_name, header):\n\n # print 'field_name [%s] header [%s]' % (field_name, header)\n\n # Make sure we are only looking at the header,\n # even if the caller passes us the entire message.\n #\n pieces = header.split('\\r\\n\\r\\n', 1)\n header = pieces[0]\n\n match = re.search('^%s\\s*:\\s*([^\\s]+)\\s*$' % field_name, header,\n re.MULTILINE | re.IGNORECASE)\n\n if match:\n return match.group(1).strip()\n else:\n return '-1'",
"def look_up(self, val):\n index = 0\n if self.head is None:\n print(\"List is empty\")\n start = self.head\n while start is not None:\n if start.data == val:\n return index\n start = start.next\n index += 1\n return \"No such element\"",
"def _findPosition(self, key):\n for i in range(len(self._entryList)):\n if self._entryList[i].key == key:\n return i\n return None",
"def dls_header_search(self, buf, f_name):\r\n self.file_size = len(buf)\r\n self.my_dls = []\r\n\r\n raw_file = buf\r\n dls_count = 0\r\n start_offset = 0\r\n end_offset = 0\r\n\r\n while end_offset != self.file_size:\r\n try:\r\n start_offset = end_offset\r\n page_len = struct.unpack(\"<I\", raw_file[start_offset + 8:start_offset + 12])[0]\r\n end_offset = start_offset + page_len\r\n\r\n if raw_file[start_offset:start_offset + 4] == b'1SLD' or raw_file[start_offset:start_offset + 4] == b'2SLD':\r\n self.my_dls.append({'Start Offset': start_offset, 'End Offset': end_offset})\r\n dls_count += 1\r\n else:\r\n self.logfile.write(\"%s: Error in length of page when finding page headers.\" % (f_name))\r\n break\r\n except:\r\n self.logfile.write(\"%s: Error in length of page when finding page headers.\" % (f_name))\r\n break\r\n\r\n if dls_count == 0:\r\n # Return false to caller so that the next file will be searched\r\n return False\r\n else:\r\n # Return true so that the DLSs found can be parsed\r\n return True",
"def find_indeces(self, header):\n indeces = {'T': None, 'WV': None, 'WK': None, 'BZ': None, 'SPR': None,\n 'WBER': None, 'ABG.': None, 'UNG.': None, 'SPOE': None,\n 'FPOE': None, 'OEVP': None, 'GRUE': None, 'NEOS': None,\n 'WWW': None, 'ANDAS': None, 'GFW': None, 'SLP': None,\n 'WIFF': None, 'M': None, 'FREIE': None}\n for index, item in enumerate(header):\n indeces[item] = index\n return indeces",
"def test_get_indices_one_existing_item_list(self):\r\n item_to_find = ['PC.355']\r\n self.assertEqual(_get_indices(self.dist_matrix_header, item_to_find),\r\n [1])",
"def search_list(search):\n fun_list = basic_list_exception.make_list()\n for x in range(len(fun_list)):\n try:\n location = fun_list.index(search)\n return location\n except ValueError:\n return -1",
"def list_find(f, items):\n for i, x in enumerate(items):\n if f(x):\n return i\n return None",
"def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)",
"def index(self, value):\n self.__validate_value(value)\n for index, v in enumerate(self.__list):\n if v == value:\n return index",
"def check_for_header(filename):\n header = {}\n start_id = -1\n with open(filename, \"r\") as f:\n start = re.compile(r\"\\bSTART|start\\b\")\n # if the file has the keyword start, extract header\n if bool(start.search(f.read())):\n f.seek(0) # set the cursor back to the beginning\n lines = f.readlines()\n for i, line in enumerate(lines):\n if start.match(line):\n start_id = i # the line number where start is used (divides header and body)\n break\n args = line.split()\n args.insert(0, \"\") # check_for_commands only handles the second argument (first is usually res_id)\n header['DEFAULT'] = check_for_commands(args, 1, 2)\n\n return header, start_id",
"def search(elements_list, element):\n for index, item in enumerate(elements_list):\n if item == element:\n return index\n return -1"
]
| [
"0.64398503",
"0.60269237",
"0.5929694",
"0.5926794",
"0.5895366",
"0.58858496",
"0.58537346",
"0.5735795",
"0.57175994",
"0.57111824",
"0.5660905",
"0.56124383",
"0.55739796",
"0.55615",
"0.5545172",
"0.5544696",
"0.5517766",
"0.55092615",
"0.5485651",
"0.5432676",
"0.5423416",
"0.5400558",
"0.5396418",
"0.53949106",
"0.5352806",
"0.5341955",
"0.5332738",
"0.5321117",
"0.5275184",
"0.5273664"
]
| 0.80507034 | 0 |
Initialize object that handles counting of words | def __init__(self):
self.word_count_dict = {}
self.num_comments = 0
self.num_words = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\r\n #\r\n # Create dictionaries for each characteristic\r\n #\r\n self.words = {} # For counting words\r\n self.wordlengths = {} # For counting word lengths\r\n self.stems = {} # For counting stems\r\n self.sentencelengths = {} # For counting sentence lengths\r\n #\r\n # Create another of your own\r\n #\r\n self.gerund = {} # For counting words with ing \r\n self.text = ''",
"def __init__(self, input_string):\n self.words_to_counts = {}\n self.split_and_populate_words(input_string)",
"def count(self, word):\n pass",
"def word_count(self):\n\n # split words on default word boundaries for words list\n words = self.phrase.split() \n\n # translate removes punctuation only, normalizes to lower case\n normalized_words = [self.normalize_word(w) for w in words]\n\n # removes empty strings after stripping punctuation\n filtered_words = [w for w in normalized_words if w]\n\n # sets up default dictionary, so all entries are 0\n word_counts = collections.defaultdict(int) #{}\n\n # define word counting function for use in reduce\n def count_word(dictionary, word):\n dictionary[word] = dictionary[word] + 1\n return dictionary\n\n # count words into dictionary from word list\n reduce(count_word, filtered_words, word_counts)\n\n return word_counts",
"def __init__(self, words, corpus):\n self.words = words\n self.vocab_size = len(words)\n self.corpus = corpus\n counter = Counter(corpus)\n self.counts = np.array([counter[i] for i in range(self.vocab_size)])",
"def word_count(self):\n from collections import Counter\n counts = Counter(self._replace_non_alnum().split())\n return counts",
"def createWordCount(word, count):\n return WordCount(word, count)",
"def word_count(self):\n return Counter(self._normalize(self._raw_phrase_str))",
"def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))",
"def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.trigramCounts = collections.defaultdict(lambda: 0)\n self.followingWords = collections.defaultdict(lambda: set())\n self.precedingWords = collections.defaultdict(lambda: set())\n self.total = 0\n self.discount = 0.75\n self.train(corpus)",
"def __init__ (self):\n self.lengths = {}\n self.lower_counts = {}\n self.upper_counts = {}\n self.digit_counts = {}\n self.symbol_counts = {}\n self.class_counts = {}\n self.word_counts = {}",
"def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.totalCount = 0\n self.zeroCount = 0\n self.train(corpus)",
"def __init__(self, wordlist=None, path=None):\n super().__init__() # Initialize this as a new dict\n if path:\n some_words = self.get_words(path)\n for word in some_words:\n if word:\n self[word] = self.get(word, 0) + 1\n if wordlist:\n for word in wordlist:\n self[word] = self.get(word, 0) + 1\n # after creating key-value pairs create instance variable that contains the sum of all values\n self.sum = sum([self.get(key, 0) for key in self]) # sum of weights\n # set the amount of words in the list to the instance variable token\n # Count of distinct word types in this histogram\n self.types = len(self)\n self.tokens = sum(self.values())",
"def __init__(self, text: str):\n self.words = WORDS_RE.findall(text)",
"def __init__(self, text):\n # BEGIN Question 2\n self.text = text\n self.word_set = []\n # END Question 2",
"def makeWords(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if x not in self.words: \r\n self.words[x] = 1\r\n else: \r\n self.words[x] += 1\r\n return self.words",
"def __init__(self, words):\n self.d = {}\n for i, w in enumerate(words):\n self.d[w] = self.d.get(w, []) + [i]",
"def convert_word_to_count(counter={}, doc=[]):\n for sentence in doc:\n for word in sentence.split():\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n return counter",
"def __init__(self, corpus):\n self.unigram_count = Counter()\n self.bigram_count = defaultdict(Counter)\n self.vocabulary_size = 0\n self.num_words = 0\n self.backoff_multiplier = 0.4\n self.train(corpus)",
"def __init__(self, n, sents):\n assert n > 0\n self._n = n\n print(\"Counting...\")\n count = defaultdict(int)\n while n >= 0:\n for sent in sents:\n s = sent[:] # En una oracion auxiliar agrego el item de start y end para contarlos\n s.insert(0, \"<s>\")\n s.append(\"</s>\")\n for i in range(len(s) - n + 1):\n count[tuple(s[i:i + n])] += 1\n n -= 1\n count[()] = count[()] - count[('<s>',)] - count[\n ('</s>',)] # Pero no quiero que <s> y </s> sean considerados por ()\n self._count = count\n print(\"Computing vocabulary...\")\n self._voc = voc = set()\n for sent in sents:\n voc = voc.union(set(sent))\n voc.add('</s>')\n self._voc = voc\n self._V = len(voc) # vocabulary size\n print(\"Done\")",
"def __init__(self, words):\n self.words = words",
"def word_count(phrase):\n return collections.Counter(phrase.split())",
"def __init__(self, model_name):\n self.name = str(model_name)\n self.numwords = 0\n self.words = {} #how many types of words\n self.word_lengths = {} #how many word lengths\n self.stems = {} #how many stems\n self.sentence_lengths = {} #how many sentence lengths\n self.common_word = [] #top ten most common words",
"def __init__(self):\n self.words = None\n self.letters = None\n self.a = None\n self.nwords = None\n self.nletters = None",
"def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)",
"def __init__(self):\n self.counts = Counter()",
"def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.total = 0\n self.train(corpus)",
"def getCounter(self):\n word_count, noun_word_count = Counter(), Counter()\n word_rating, noun_word_rating = defaultdict(list), defaultdict(list)\n docs = self.nlp.pipe(\n self.docs, n_process=1, disable=self.disablelayers)\n \n\n for index, doc in enumerate(docs):\n for token in doc:\n if not token.is_stop and not token.is_punct and token.pos_ in self.pos:\n if token.pos_ == 'PROPN':\n word_count[token.lemma_] += 1\n word_rating[token.lemma_].append(self.ratings[index])\n else:\n noun_word_count[token.lemma_] += 1\n noun_word_rating[token.lemma_].append(self.ratings[index])\n\n # if 0<=proper nouns<=5 found, add regular nouns\n if not word_count or len(word_count) <= 5:\n word_count += noun_word_count\n word_rating = {**word_rating, **noun_word_rating}\n \n word_color = {word: self.getColor(\n ratings)[1] for word, ratings in word_rating.items()}\n word_sentiment = {word: self.getColor(\n ratings)[0] for word, ratings in word_rating.items()}\n\n return word_count, word_color, word_sentiment",
"def __init__(self, word_list=None):\n super(MarkovChain, self).__init__() # Initialize this as a new dict\n # Add properties to track useful word counts for this histogram\n self.types = 0 # Count of distinct word types in this histogram\n self.tokens = 0 # Total count of all word tokens in this histogram\n # Count words in given list, if any\n # Done: Initialize from parameter\n if word_list is not None:\n prev1 = word_list[0]\n prev2 = word_list[1]\n for curr in word_list[2:]:\n self.add_word((prev1, prev2), curr)\n prev1 = prev2\n prev2 = curr\n self.add_word((prev1, prev2))",
"def get_number_of_words(self):\n filename = f'{self.path}/{self.filename}'\n # word_counter = {}\n # w_cnt = 0\n # x = 0\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n # for word in word_list:\n # w_cnt += 1\n # if word not in word_counter:\n # word_counter[word] = 1\n # else:\n # word_counter[word] = word_counter[word] + 1\n\n # for word in word_list:\n # x += 1\n # print(word, word.isalpha(), x)\n\n w_cnt = sum([a[0].isalpha() for a in word_list])\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_words', w_cnt)\n print(datetime.now(), '-', 'number_of_words for', self.filename, 'calculated =', w_cnt)\n return None"
]
| [
"0.75186896",
"0.7517636",
"0.7487317",
"0.74007696",
"0.72729164",
"0.7101917",
"0.7101241",
"0.7074422",
"0.6983678",
"0.6976498",
"0.69556195",
"0.6916977",
"0.6903863",
"0.68937266",
"0.68930876",
"0.68808365",
"0.6868824",
"0.68647087",
"0.686444",
"0.68576026",
"0.68557787",
"0.6835525",
"0.6822123",
"0.6800465",
"0.67794067",
"0.6750214",
"0.67385334",
"0.672812",
"0.6713169",
"0.6713103"
]
| 0.80315834 | 0 |
Instantiate the class with data. this class gets called from a PptxTable. | def __init__(self, data):
self.data = data
self.columns = Columns(data)
self.rows = Rows(data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n _snap.TTableRow_swiginit(self, _snap.new_TTableRow())",
"def __init__(self, table):\n\n self.table = table\n\n ## Lots of shortcutting\n # \"Connection\"\n self.client = self.table.db.client\n\n # Table object\n self.worksheet = self.table.worksheet\n self.worksheet_id = self.table.worksheet_id\n\n # Addressing\n self.fields = self.table.fields\n\n ## And something to come later\n self.row = None\n self.data = {}",
"def __init__(self, flowchart=None, extension=None):\n logger.debug(\"Creating Table {}\".format(self))\n\n # Initialize our parent class\n super().__init__(\n flowchart=flowchart, title=\"Table\", extension=extension, logger=logger\n )\n\n # This needs to be after initializing subclasses...\n self.parameters = table_step.TableParameters()\n self.calls = 0",
"def __init__(self, *args):\n _snap.TTable_swiginit(self, _snap.new_TTable(*args))",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, **kwargs):\n TableLoader.__init__(self, **kwargs)",
"def __init__(self, *args):\n _table.Table_swiginit(self, _table.new_Table(*args))",
"def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data",
"def __init__(self, tableValues=None, json=None):\n if tableValues is not None:\n self.class_id = tableValues[0]\n self.crop_id = tableValues[1]\n self.target = tableValues[2]\n self.type = tableValues[3]\n self.latitude = tableValues[4]\n self.longitude = tableValues[5]\n self.orientation = tableValues[6]\n self.shape = tableValues[7]\n self.background_color = tableValues[8]\n self.alphanumeric = tableValues[9]\n self.alphanumeric_color = tableValues[10]\n self.description = tableValues[11]\n self.submitted = tableValues[12]\n elif json is not None:\n for prop in self.allProps():\n if prop in json:\n setattr(self, prop, json[prop])",
"def __init__(self, row):\n state = inspect(row)\n\n # Don't store the actual row, so we can serialize.\n self._model_cls = state.class_\n self._pk = state.identity\n\n self.data = Box(dict(row))",
"def __init__(\n self, table, ioloop, data_cleaner=None\n ):\n self.table = table # An already-created Perspective table\n self.ioloop = ioloop\n self._data_cleaner = data_cleaner",
"def __init__(\n self,\n table: \"Table\",\n use_header: bool = True,\n template: Optional[Template] = None,\n escape: bool = True,\n ):\n self.table = table\n self.escape = escape\n self.use_header = use_header\n self.template = self.get_template()\n self.context = self.get_context(table, self.use_header)",
"def __init__(self, object_list, table_name, crowdcontext):\n self.cc = crowdcontext\n self.data = {'id': range(len(object_list)), 'object':object_list}\n self.start_id = len(object_list)\n self.cols = [\"id\", \"object\"]\n self.table_name = table_name\n self.presenter = None\n self.project_id = None\n self.project_short_name = None\n self.project_name = None\n\n if type(object_list) is not list:\n raise Exception(\"'object_list' should be a list\")\n if table_name not in self.cc.show_tables():\n try:\n exe_str = \"CREATE TABLE '%s' (id integer, col_name BLOB, value BLOB DEFAULT NULL, PRIMARY KEY(id, col_name))\" %(table_name)\n self.cc.cursor.execute(exe_str)\n except sqlite3.OperationalError:\n raise",
"def __init__(self, data):\n self.data = data\n return",
"def __init__(self, plantsim, table_name):\n self._rows = []\n self._rows_coldict = []\n\n row_count = plantsim.get_value(f'{table_name}.YDim')\n col_count = plantsim.get_value(f'{table_name}.XDim')\n if row_count > 0 and col_count > 0:\n for row_idx in range(row_count + 1):\n row = []\n row_coldict = {}\n for col_idx in range(col_count + 1):\n cell_value = plantsim.get_value(f'{table_name}[{col_idx}, {row_idx}]')\n row.append(cell_value)\n if row_idx > 0:\n col_header = self.rows[0][col_idx]\n row_coldict[col_header] = cell_value\n self._rows.append(row)\n if row_idx > 0:\n self._rows_coldict.append(row_coldict)",
"def __init__(self):\n self.title = None\n self.table = pd.DataFrame()\n self.column_widths = None;",
"def __init__(self, datain, parent=None):\r\n QAbstractTableModel.__init__(self, parent)\r\n self.arraydata = datain",
"def __init__(self,data):\n\t\tself.data = tuple([tuple(x) if isiterable(x) else (x,) for x in data])\n\t\tself.rows = len(self.data)\n\t\tself.cols = len(self.data[0]) if len(self.data)>0 else 0",
"def __init__(self, *args, **kwargs):\n \n self.dense = True\n\n # Create table\n super().__init__(*args, **kwargs)",
"def __init__(self, tabletext=\"\"):\n\n # Table attributes\n self.__title = \"\"\n self.__type = \"GRAPHS\" # Default to GRAPHS\n self.__graphs = \"\"\n self.__columns = \"\"\n self.__text = \"\"\n self.__data = \"\"\n # Derived data\n self.__graph_list = []\n self.__column_list = []\n # Indicate the the object has been populated\n self.__table_parse_error = False\n self.__nonzero = False\n # The \"raw\" table data from the log file\n self.__rawtable = \"\"\n # Attempt to populate the table\n if tabletext:\n self.__rawtable = tabletext\n if not self.__buildtable(tabletext):\n # Failed to extract table\n # If it could be a title then use this\n # instead\n if str(tabletext).count(\"\\n\") == 0:\n self.settitle(tabletext)",
"def __init__(self, plot_factory, df, title=\"Graph\"):\n super().__init__(title=title)\n self.Table = Table(plot_factory, df, \"Show Table\")\n self.totalButtons = 10\n self.plot_factory = plot_factory\n self.df = df\n self.IdTitlePair = [\"id\", \"title\"]"
]
| [
"0.7175044",
"0.7061784",
"0.688019",
"0.687335",
"0.68206704",
"0.68206704",
"0.68206704",
"0.68206704",
"0.68206704",
"0.68206704",
"0.68206704",
"0.68206704",
"0.68206704",
"0.68206704",
"0.68206704",
"0.6778853",
"0.66937035",
"0.6593775",
"0.6545444",
"0.6529477",
"0.65272325",
"0.64250976",
"0.6422431",
"0.6415437",
"0.64110005",
"0.6401508",
"0.6388069",
"0.6350096",
"0.6316716",
"0.6296386"
]
| 0.7786698 | 0 |
Updates the column index to account for the headers and updates the data self.data. | def set_column_headers(self, headers):
if isinstance(self.columns.idx[0], int):
self.data = [sorted(headers)] + self.data
increment = [i + 1 for i in self.rows.idx]
self.rows.idx = [0] + increment
elif isinstance(self.columns.idx[0], str):
datum = {}
for i, key in enumerate(self.columns.idx):
datum.update({key: headers[i]})
self.data = [datum] + self.data
increment = [i + 1 for i in self.rows.idx]
self.rows.idx = [0] + increment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def UpdateColumns(self):\r\n data = self.data\r\n columns = data.getParam('columns',data.tankColumns[:])\r\n col_name = data.getParam('colNames',{})\r\n col_width = data.getParam('colWidths',{})\r\n col_align = data.getParam('colAligns',{})\r\n for index,column in enumerate(columns):\r\n name = col_name.get(column,_(column))\r\n width = col_width.get(column,30)\r\n align = wxListAligns[col_align.get(column,'LEFT')]\r\n self.gList.InsertColumn(index,name,align)\r\n self.gList.SetColumnWidth(index, width)",
"def _modify_columns(self, cols, X, y=None):",
"def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0",
"def updateRow(self, index: int) -> None:\n ...",
"def __update_feature_table_columns(self):\n self.__init_table()\n\n feature_dict_sorted_keys = feature_extractor_definition.keys()\n feature_dict_sorted_keys.sort()\n for key in feature_dict_sorted_keys:\n if not self.__has_feature_column(key):\n self.__add_feature_column(key, feature_extractor_definition[key])",
"def set_data_by_columns(self, row_headers, column_headers, data_columns):\n self.tblGeneric.setRowCount(len(data_columns[0]))\n self.tblGeneric.setColumnCount(len(data_columns))\n\n if row_headers:\n self.tblGeneric.setVerticalHeaderLabels(row_headers)\n else:\n self.tblGeneric.verticalHeader().setVisible(False)\n if column_headers:\n self.tblGeneric.setHorizontalHeaderLabels(column_headers)\n else:\n self.tblGeneric.horizontalHeader().setVisible(False)\n\n col = 0\n for data_column in data_columns:\n row = 0\n for data_value in data_column:\n item = QTableWidgetItem(str(data_value))\n item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\n self.tblGeneric.setItem(row, col, item)\n row += 1\n col += 1\n self.tblGeneric.resizeColumnsToContents()\n for row in range(self.tblGeneric.rowCount()):\n self.tblGeneric.setRowHeight(row, 10)",
"def _updateColAttrs(self, grid):\n col = 0\n\n for colname in self.table.columns:\n attr = wx.grid.GridCellAttr()\n renderer = MegaFontRenderer(self.table)\n attr.SetRenderer(renderer)\n grid.SetColAttr(col, attr)\n col += 1",
"def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header",
"def update_by_index(df, col, indexs, data):\n for indx in indexs:\n df.loc[indx, col] = data",
"def setAllColumns(self, newAllColumns):\n \n pass",
"def update_old_row(self, data):\n for key, value in data.items():\n _column = self._labels.index([v['display'] for k, v in self.headers.items() if k == key].pop())\n cell = self.item(self._opt_row, _column)\n _cell_data = cell.get_data()\n _cell_data[key] = value\n\n cell.set_content(value, _cell_data)",
"def add_blank_data_column(self):\n\n header_title, ok_pressed = QInputDialog.getText(self, \"Add Column\", \"Enter heading for the column:\",\n QLineEdit.Normal, \"\")\n if ok_pressed and header_title != '':\n # print(header_title)\n\n default_value, set_default_pressed = QInputDialog.getText(self, \"Set Default Value\",\n \"Enter default value to set for column if any:\",\n QLineEdit.Normal, \"\")\n\n row_count = self.csv_data_table.rowCount()\n last_column_count = self.csv_data_table.columnCount()\n self.csv_data_table.insertColumn(last_column_count)\n for empty_row in range(0, row_count):\n item = QTableWidgetItem(default_value)\n self.csv_data_table.setItem(empty_row, last_column_count, item)\n\n # TODO: fix untraced bug present in show/hide columns\n self.column_headers.append(header_title)\n self.column_headers_all.append(header_title)\n # print(self.column_headers)\n # print(self.column_headers_all)\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)",
"def update_column(self, xmldata):\n columns = managers.request_manager.get_request().session().value(\"columns\")\n headers = managers.request_manager.get_request().session().value(\"headers\")\n if not columns:\n return False\n if xmldata:\n # Parsing of column declaration\n dom = parseString(xmldata.encode(\"UTF-8\"))\n column = dom.getElementsByTagName(\"column\")[0]\n name = un_quote(column.getAttribute(\"name\"))\n if not name:\n return False\n declaration = name\n constraints = {}\n cid = column.getAttribute(\"id\")\n type = column.getAttribute(\"type\")\n if not type or type == \"INTEGER\" or type == \"REAL\" or type == \"TEXT\" or type == \"BLOB\":\n constraints[\"type\"] = type\n if column.getAttribute(\"notnull\") == \"true\":\n constraints[\"not null\"] = True\n if column.getAttribute(\"primary\") == \"true\":\n if column.getAttribute(\"autoincrement\") == \"true\":\n constraints[\"primary key\"] = \"autoincrement\"\n else:\n constraints[\"primary key\"] = True\n if column.getAttribute(\"unique\") == \"true\":\n constraints[\"unique\"] = True\n\n if column.getAttribute(\"default\") and column.getAttribute(\"default\") != \"\" and column.getAttribute(\"default\") != \"NULL\":\n constraints[\"default\"] = column.getAttribute(\"default\")\n\n column_obj = VDOM_db_column(name, constraints)\n column_obj.id = cid\n\n # praparing SQL code\n old_column = None\n for col in columns:\n if columns[col].id == cid:\n old_column = columns[col]\n break\n if not old_column:\n return False\n\n newtable = \"%s_new(\" % self.name\n oldtable = \"%s(\" % self.name\n for col in headers:\n if oldtable[-1] != \"(\":\n oldtable += \", \"\n oldtable += columns[col].to_declaration()\n\n if columns[col].id == cid:\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += column_obj.to_declaration()\n\n else:\n if newtable[-1] != \"(\":\n newtable += \", \"\n newtable += columns[col].to_declaration()\n newtable += \")\"\n if newtable[-2] == \"(\":\n return False\n newcols = []\n newcols.extend(headers)\n newcols.remove(old_column.name)\n newcols_decl = \"\"\n for ctr in newcols:\n newcols_decl += \", `%s`\" % ctr\n\n sql = \"\"\"BEGIN TRANSACTION;\nCREATE TABLE %(newtable)s;\nINSERT INTO `%(newtablename)s` (%(newcols)s) SELECT %(newcols)s FROM `%(oldtablename)s`;\nDROP TABLE `%(oldtablename)s`;\nALTER TABLE `%(newtablename)s` RENAME TO `%(oldtablename)s`;\nEND TRANSACTION;\"\"\" % {\"newtable\": newtable, \"newtablename\": self.name + \"_new\", \"oldtablename\": self.name, \"newcols\": newcols_decl[2:]}\n query = VDOM_sql_query(self.owner_id, self.database_id, sql, None, True)\n query.commit()\n columns.pop(old_column.name)\n columns[column_obj.name] = column_obj\n managers.request_manager.get_request().session().value(\"columns\", columns)\n self.restore_structure()\n return True",
"def __append_columns(self, new_dataframe):\n self.dataframe = pd.merge(self.dataframe, new_dataframe)",
"def update_model_output(self):\n warnings.warn(\"Please ensure that the column names of the new file accurately corresponds to the relevant column names in the exisitng file\")\n column_names_new = self.new_data.head()\n column_names_old = self.existing_data.head()\n for column_name in column_names_new:\n if column_name in column_names_old:\n self.existing_data[column_name] = self.new_data[column_name]\n \n self.existing_data.to_csv(filename_main, index = False)",
"def _update_columns(self):\n self.columns, self.new_columns = self.new_columns, self.columns\n self.num_columns = self.num_new_columns\n self.num_new_columns = 0\n\n # Now update new_columns and mapping with the information for the commit\n # after this one.\n #\n # First, make sure we have enough room. At most, there will be\n # self.num_columns + self.num_parents columns for the next commit.\n max_new_columns = self.num_columns + self.num_parents\n\n # Clear out self.mapping\n self.mapping_size = 2 * max_new_columns\n for i in range(self.mapping_size):\n self.mapping[i] = -1\n\n # Populate self.new_columns and self.mapping\n #\n # Some of the parents of this commit may already be in self.columns. If\n # so, self.new_columns should only contain a single entry for each such\n # commit. self.mapping should contain information about where each\n # current branch line is supposed to end up after the collapsing is\n # performed.\n seen_this = False\n mapping_idx = 0\n is_commit_in_columns = True\n for i in range(self.num_columns + 1):\n if i == self.num_columns:\n if seen_this:\n break\n is_commit_in_columns = False\n col_commit = self.commit\n else:\n col_commit = self.columns[i].commit\n\n if col_commit == self.commit:\n old_mapping_idx = mapping_idx\n seen_this = True\n self.commit_index = i\n for parent in self._interesting_parents():\n # If this is a merge, or the start of a new childless\n # column, increment the current color.\n if self.num_parents > 1 or not is_commit_in_columns:\n self._increment_column_color()\n mapping_idx = self._insert_into_new_columns(\n parent,\n mapping_idx)\n # We always need to increment mapping_idx by at least 2, even if\n # it has no interesting parents. The current commit always takes\n # up at least 2 spaces.\n if mapping_idx == old_mapping_idx:\n mapping_idx += 2\n else:\n mapping_idx = self._insert_into_new_columns(col_commit,\n mapping_idx)\n\n # Shrink mapping_size to be the minimum necessary\n while (self.mapping_size > 1 and\n self.mapping[self.mapping_size - 1] < 0):\n self.mapping_size -= 1\n\n # Compute self.width for this commit\n self._update_width(is_commit_in_columns)",
"def SetColumn(self, column, colInfo):\r\n\r\n self._header_win.SetColumn(column, colInfo)\r\n self._header_win.Refresh()",
"def set_column_footers(self, footers):\n if isinstance(self.columns.idx[0], int):\n self.data += [sorted(footers)]\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment\n\n elif isinstance(self.columns.idx[0], str):\n datum = {}\n for i, key in enumerate(self.columns.idx):\n datum.update({key: footers[i]})\n self.data += [datum]\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment",
"def update_column_levels(self):\n extra_levels = self.dataframe.columns.nlevels - self.nlevels\n if extra_levels > 0:\n self.nlevels = self.dataframe.columns.nlevels\n column_indices = {}\n column_names = {}\n for column_name in self.column_indices:\n if not isinstance(column_name, tuple):\n # It's one of our special columns\n new_column_name = column_name\n else:\n new_column_name = column_name + ('',) * extra_levels\n column_index = self.column_indices[column_name]\n column_indices[new_column_name] = column_index\n column_names[column_index] = new_column_name\n self.column_indices = column_indices\n self.column_names = column_names",
"def updateHeaderComputedValues( self ):\n self.nAvgBytesPerSec = int( self.nNbrChannel*self.nSamplingRate*self.nNbrBitsPerSample/8 )\n self.nSizeBlockAlign = int( self.nNbrChannel*self.nNbrBitsPerSample/8 )\n self.dataType = Wav.getDataType( self.nNbrBitsPerSample )",
"def reset_index(self):\n self.df = self.df.reset_index()",
"def set_index(self, df):\n\n # generate a map of continuous index values to items\n self.index2item = dict(enumerate(df[self.col_item].unique()))\n\n # invert the mapping from above\n self.item2index = {v: k for k, v in self.index2item.items()}\n\n # create mapping of users to continuous indices\n self.user2index = {x[1]: x[0] for x in enumerate(df[self.col_user].unique())}\n\n # set values for the total count of users and items\n self.n_users = len(self.user2index)\n self.n_items = len(self.index2item)",
"def fast_update_col(self,j,vals):\n dataptr = self.col_view[:,j].data\n self.X.data[dataptr] = vals",
"def setdata(self, data):\n\n # Store the data from the table (\"raw\" format)\n # This is a list of whitespace separated data items\n self.__data = data\n # Attempt to populate the column objects\n if self.ncolumns() > 0:\n self.__populate_columns()\n self.__nonzero = True",
"def updateData(self, data, filename):\r\n self.data = data\r\n #self.filename = filename + \".xlsx\"\r\n self.index = \"index.txt\"\r\n self.colum = ('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R'\r\n ,'S','T','U','V')\r\n self.cf = XLC.checkfile(self, self.index )\r\n #print(self.cf)\r\n if self.cf == True:\r\n self.resultFile = open(self.index, 'r')\r\n self.row = int(self.resultFile.read())\r\n #print(self.row)\r\n self.resultFile.close()\r\n if self.cf == False:\r\n print(\"[WARNING] index.txt file is missing. Neglect this message if this program is running 1st time on your system\")\r\n self.row = 2\r\n \r\n # Looding the document\r\n self.filename = filename + \".xlsx\"\r\n wb = xl.load_workbook(self.filename)\r\n sheet = wb.sheetnames\r\n sheet = wb.active\r\n\r\n if sheet['A2'].value == None:\r\n self.row = 2\r\n\r\n self.data = self.data.split(\",\")\r\n for i in range(0, len(self.data)):\r\n sheet[self.colum[i]+str(self.row)] = self.data[i]\r\n #print(self.colum[i]+str(self.row))\r\n\r\n self.row = self.row + 1\r\n self.resultFile = open('index.txt', 'w')\r\n self.resultFile.write(str(self.row))\r\n self.resultFile.close()\r\n wb.save(self.filename)\r\n return",
"def _reload(self):\n if os.path.exists(self.filename):\n self.data = pd.read_csv(self.filename)\n else:\n self.data = pd.DataFrame(columns=self.unique_keys)\n\n # Set these default values\n # if 'weight_rescale' not in self.data.columns:\n # self.data['weight_rescale'] = 'none'\n # if 'norm' not in self.data.columns:\n # self.data['norm'] = 'softmax'\n # if 'update' not in self.data.columns:\n # self.data['update'] = 'all'\n # if 'replay' not in self.data.columns:\n # self.data['replay'] = False\n if 'debug' not in self.data.columns:\n self.data['debug'] = False\n\n # if 'tie' not in self.data.columns:\n # self.data['tie'] = False\n\n if 'update_length' not in self.data.columns:\n self.data['update_length'] = 0\n # for key in self.unique_keys:\n # self.data[key] = np.nan\n # Remaining set to None\n # for k in self.check_keys:\n # if k not in self.data.columns:\n # self.data[k] = None",
"def update_from_indexes(self, data, **kw):\n for i in data:\n self.update_from_index(i, **kw)",
"def update_all_data(self):\n self.dataChanged.emit(qtc.QModelIndex(), qtc.QModelIndex())",
"def update(self): \n \n timeout=0\n \n while(timeout<10):\n try:\n data = pnd.read_csv(self.path,index_col=0,sep=',',names=['Values'])\n self.data['Values']=data['Values'].values\n self.data.Values.values[-1] = data.index.values[-1]\n \n# print('Updated from file:',self.data.iloc[-1].values[0])\n break\n except ValueError:\n timeout+=1\n if timeout == 10:\n print('failed to update')",
"def sort_columns(self):\n extra_cols = []\n for col in self.data.columns:\n if col not in self._required_columns:\n extra_cols.append(col)\n sorted_colnames = list(self._required_columns) + sorted(extra_cols)\n assert len(sorted_colnames) == len(self.data.columns)\n self.data = self.data.reindex(columns=sorted_colnames)"
]
| [
"0.66377145",
"0.61051923",
"0.60043097",
"0.59972775",
"0.5958276",
"0.5950638",
"0.5932546",
"0.5932343",
"0.5900175",
"0.5803581",
"0.57608676",
"0.573972",
"0.57000065",
"0.56839633",
"0.5668996",
"0.56637555",
"0.5663126",
"0.5660447",
"0.5659867",
"0.56540513",
"0.56156576",
"0.560133",
"0.55797124",
"0.55774176",
"0.55766237",
"0.5576286",
"0.5566033",
"0.5523857",
"0.5507167",
"0.5497163"
]
| 0.69262457 | 0 |
Returns the origin's type specs. A TFXIO 'Y' may be a result of projection of another TFXIO 'X', in which case then 'X' is the origin of 'Y'. And this method returns what X.TensorAdapter().TypeSpecs() would return. May equal to `self.TypeSpecs()`. | def OriginalTypeSpecs(self) -> Dict[str, tf.TypeSpec]:
return self._original_type_specs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def TypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n return self._type_specs",
"def get_spec_type(self):\r\n return self._spec_type",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def numpy_types(self) -> List[np.dtype]:\n if self.is_tensor_spec():\n return [x.type for x in self.inputs]\n return [x.type.to_numpy() for x in self.inputs]",
"def types(self) -> list:\n if self._types is None:\n fdist = self.fdist # ranked order\n types_ = list(fdist.type.values)\n self._types = types_\n return self._types",
"def pandas_types(self) -> List[np.dtype]:\n if self.is_tensor_spec():\n raise MlflowException(\"TensorSpec only supports numpy types, use numpy_types() instead\")\n return [x.type.to_pandas() for x in self.inputs]",
"def input_type_shapes(self):\n return self._input_type_shapes",
"def type_shapes(self):\n return self._type_shapes",
"def type_spec(self) -> tf.TypeSpec:\n raise NotImplementedError",
"def input_types(self) -> List[Union[DataType, np.dtype]]:\n return [x.type for x in self.inputs]",
"def xtype(self):\n if not hasattr(self, '_xtype'):\n if 'Magnetic' in self.__class__.__name__:\n self._xtype = 'magnetic'\n else: # Default\n self._xtype = 'electric'\n return self._xtype",
"def dtype_specs(self):\r\n # TODO: add more type correspondances for e.g. int32, int64, float32,\r\n # complex64, etc.\r\n try:\r\n return {\r\n 'float32': (float, 'npy_float32', 'NPY_FLOAT32'),\r\n 'float64': (float, 'npy_float64', 'NPY_FLOAT64'),\r\n 'uint8': (int, 'npy_uint8', 'NPY_UINT8'),\r\n 'int8': (int, 'npy_int8', 'NPY_INT8'),\r\n 'uint16': (int, 'npy_uint16', 'NPY_UINT16'),\r\n 'int16': (int, 'npy_int16', 'NPY_INT16'),\r\n 'uint32': (int, 'npy_uint32', 'NPY_UINT32'),\r\n 'int32': (int, 'npy_int32', 'NPY_INT32'),\r\n 'uint64': (int, 'npy_uint64', 'NPY_UINT64'),\r\n 'int64': (int, 'npy_int64', 'NPY_INT64'),\r\n 'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'),\r\n 'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')\r\n }[self.dtype]\r\n except KeyError:\r\n raise TypeError(\"Unsupported dtype for %s: %s\"\r\n % (self.__class__.__name__, self.dtype))",
"def dtype_specs(self):\r\n #TODO: add more type correspondances for e.g. int32, int64, float32,\r\n #complex64, etc.\r\n try:\r\n return {'float32': (float, 'npy_float32', 'NPY_FLOAT32'),\r\n 'float64': (float, 'npy_float64', 'NPY_FLOAT64'),\r\n 'uint8': (int, 'npy_uint8', 'NPY_UINT8'),\r\n 'int8': (int, 'npy_int8', 'NPY_INT8'),\r\n 'uint16': (int, 'npy_uint16', 'NPY_UINT16'),\r\n 'int16': (int, 'npy_int16', 'NPY_INT16'),\r\n 'uint32': (int, 'npy_uint32', 'NPY_UINT32'),\r\n 'int32': (int, 'npy_int32', 'NPY_INT32'),\r\n 'uint64': (int, 'npy_uint64', 'NPY_UINT64'),\r\n 'int64': (int, 'npy_int64', 'NPY_INT64'),\r\n 'complex128': (complex, 'theano_complex128',\r\n 'NPY_COMPLEX128'),\r\n 'complex64': (complex, 'theano_complex64',\r\n 'NPY_COMPLEX64')}[self.dtype]\r\n except KeyError:\r\n raise TypeError(\"Unsupported dtype for %s: %s\" % (\r\n self.__class__.__name__, self.dtype))",
"def get_types(self):\n return self.types",
"def dtypes(self):\n return self.to_pandas().dtypes",
"def output_type_shapes(self):\n return self._output_type_shapes",
"def info_types(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetInfoType']:\n return pulumi.get(self, \"info_types\")",
"def input_tensorspec(self):\n return self._tensorspec",
"def data_types(self) -> 'outputs.AwsS3DataConnectorDataTypesResponse':\n return pulumi.get(self, \"data_types\")",
"def kind(self):\n # type () -> str\n return np.dtype(self.type).kind",
"def get_types(self):\n return self.column_type",
"def column_types(self):\n if self.__type__ == VERTEX_GFRAME:\n return self.__graph__.__proxy__.get_vertex_field_types()\n elif self.__type__ == EDGE_GFRAME:\n return self.__graph__.__proxy__.get_edge_field_types()",
"def get_data_types():\n return tf.float32, tf.float32, tf.int32",
"def get_types(self) :\n\n return list(self.types)[1:]",
"def get_spec(self):\n from schematics.types import ModelType\n spec = {\n 'id': self.name,\n 'description': self.description,\n 'addressable': self.array,\n 'required': self.required,\n }\n if self.type.has_schema:\n spec['schema'] = self.type.get_spec()\n else:\n spec.update(self.type.get_spec())\n\n return spec",
"def type(self):\n # type: () -> type\n return _python_type_map[str(self.xnd_dtype.hidden_dtype)]",
"def ftype(obj):\n if isinstance(obj, np.ndarray) or isinstance(obj, torch.Tensor):\n types = \" \".join([str(x) for x in [type(obj), obj.dtype, list(obj.shape)]])\n if isinstance(obj, torch.Tensor):\n types = \" \".join([obj.device.type, types])\n else:\n types = str(type(obj))\n\n for s in [\"class \", \"'\", \"<\", \">\", \"(\", \")\", \"torch.\", \"numpy.\" ]:\n types = types.replace(s, \"\")\n return types",
"def get_predictors_types(self):\n\t\treturn self.predictors_types",
"def types(self) -> List[str]:\n return self._types",
"def type(self) -> np.dtype:\n return self._tensorInfo.dtype"
]
| [
"0.67809033",
"0.6172155",
"0.6097099",
"0.5830014",
"0.5826194",
"0.58002317",
"0.5799282",
"0.5738908",
"0.5685234",
"0.5614321",
"0.558659",
"0.5584207",
"0.5573686",
"0.5501269",
"0.54489535",
"0.5427112",
"0.53903335",
"0.5382472",
"0.5372961",
"0.53345495",
"0.5292687",
"0.52871114",
"0.5275485",
"0.526656",
"0.5265811",
"0.5264906",
"0.5260035",
"0.52570724",
"0.5248782",
"0.52205116"
]
| 0.65551096 | 1 |
Returns the TypeSpec for each tensor. | def TypeSpecs(self) -> Dict[str, tf.TypeSpec]:
return self._type_specs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def type_spec(self) -> tf.TypeSpec:\n raise NotImplementedError",
"def numpy_types(self) -> List[np.dtype]:\n if self.is_tensor_spec():\n return [x.type for x in self.inputs]\n return [x.type.to_numpy() for x in self.inputs]",
"def get_all_tensor_dtypes() -> KeysView[int]:\n return mapping.TENSOR_TYPE_MAP.keys()",
"def OriginalTypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n return self._original_type_specs",
"def get_data_types():\n return tf.float32, tf.float32, tf.int32",
"def pandas_types(self) -> List[np.dtype]:\n if self.is_tensor_spec():\n raise MlflowException(\"TensorSpec only supports numpy types, use numpy_types() instead\")\n return [x.type.to_pandas() for x in self.inputs]",
"def TensorRepresentations(self) -> tensor_adapter.TensorRepresentations:",
"def _specs_for_flat_tensors(element_spec):\n if isinstance(element_spec, StructuredTensor.Spec):\n specs = []\n for _, field_spec in sorted(\n element_spec._field_specs.items(), key=lambda t: t[0]): # pylint: disable=protected-access\n specs.extend(_specs_for_flat_tensors(field_spec))\n elif isinstance(element_spec, type_spec.BatchableTypeSpec) and (\n element_spec.__class__._flat_tensor_specs is # pylint: disable=protected-access\n type_spec.BatchableTypeSpec._flat_tensor_specs): # pylint: disable=protected-access\n # Classes which use the default `_flat_tensor_specs` from\n # `BatchableTypeSpec` case (i.e. a derived class does not override\n # `_flat_tensor_specs`.) are encoded using `component_specs`.\n specs = nest.flatten(\n element_spec._component_specs, # pylint: disable=protected-access\n expand_composites=False)\n else:\n # In addition flatting any nesting in Python,\n # this default case covers things that are encoded by one tensor,\n # such as dense tensors which are unchanged by encoding and\n # ragged tensors and sparse tensors which are encoded by a variant tensor.\n specs = nest.flatten(element_spec, expand_composites=False)\n return specs",
"def dtype_specs(self):\r\n # TODO: add more type correspondances for e.g. int32, int64, float32,\r\n # complex64, etc.\r\n try:\r\n return {\r\n 'float32': (float, 'npy_float32', 'NPY_FLOAT32'),\r\n 'float64': (float, 'npy_float64', 'NPY_FLOAT64'),\r\n 'uint8': (int, 'npy_uint8', 'NPY_UINT8'),\r\n 'int8': (int, 'npy_int8', 'NPY_INT8'),\r\n 'uint16': (int, 'npy_uint16', 'NPY_UINT16'),\r\n 'int16': (int, 'npy_int16', 'NPY_INT16'),\r\n 'uint32': (int, 'npy_uint32', 'NPY_UINT32'),\r\n 'int32': (int, 'npy_int32', 'NPY_INT32'),\r\n 'uint64': (int, 'npy_uint64', 'NPY_UINT64'),\r\n 'int64': (int, 'npy_int64', 'NPY_INT64'),\r\n 'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'),\r\n 'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')\r\n }[self.dtype]\r\n except KeyError:\r\n raise TypeError(\"Unsupported dtype for %s: %s\"\r\n % (self.__class__.__name__, self.dtype))",
"def dtype_specs(self):\r\n #TODO: add more type correspondances for e.g. int32, int64, float32,\r\n #complex64, etc.\r\n try:\r\n return {'float32': (float, 'npy_float32', 'NPY_FLOAT32'),\r\n 'float64': (float, 'npy_float64', 'NPY_FLOAT64'),\r\n 'uint8': (int, 'npy_uint8', 'NPY_UINT8'),\r\n 'int8': (int, 'npy_int8', 'NPY_INT8'),\r\n 'uint16': (int, 'npy_uint16', 'NPY_UINT16'),\r\n 'int16': (int, 'npy_int16', 'NPY_INT16'),\r\n 'uint32': (int, 'npy_uint32', 'NPY_UINT32'),\r\n 'int32': (int, 'npy_int32', 'NPY_INT32'),\r\n 'uint64': (int, 'npy_uint64', 'NPY_UINT64'),\r\n 'int64': (int, 'npy_int64', 'NPY_INT64'),\r\n 'complex128': (complex, 'theano_complex128',\r\n 'NPY_COMPLEX128'),\r\n 'complex64': (complex, 'theano_complex64',\r\n 'NPY_COMPLEX64')}[self.dtype]\r\n except KeyError:\r\n raise TypeError(\"Unsupported dtype for %s: %s\" % (\r\n self.__class__.__name__, self.dtype))",
"def feature_spec(self):\n if not self.infer_without_label:\n feature_shapes = [(tf.TensorShape([tf.Dimension(None)]), tf.TensorShape([]),\n tf.TensorShape([tf.Dimension(None)]), tf.TensorShape([]))]\n feature_shapes.append(tf.TensorShape([tf.Dimension(None)]))\n else:\n feature_shapes = [(tf.TensorShape([tf.Dimension(None)]), tf.TensorShape([]))\n ]\n if len(feature_shapes) == 1:\n return feature_shapes[0]\n return tuple(feature_shapes)",
"def type_shapes(self):\n return self._type_shapes",
"def input_tensorspec(self):\n return self._tensorspec",
"def input_type_shapes(self):\n return self._input_type_shapes",
"def sample_ids_dtype(self):\n # Copied from the seq2seq.TrainingHelper class.\n return tf.int32",
"def sample_ids_dtype(self):\n # Copied from the abstract seq2seq.CustomHelper class.\n return tf.int32",
"def list_to_backend_type(data: List) -> TTensor:",
"def output_type_shapes(self):\n return self._output_type_shapes",
"def generate_object_specs(self):\n return [[] for _ in xrange(self.batch_size)]",
"def get_input_tensor_types(\n model_buffer: bytearray) -> List[_schema_fb.TensorType]:\n subgraph = _get_subgraph(model_buffer)\n tensor_types = []\n for i in range(subgraph.InputsLength()):\n index = subgraph.Inputs(i)\n tensor_types.append(subgraph.Tensors(index).Type())\n return tensor_types",
"def get_output_tensor_types(\n model_buffer: bytearray) -> List[_schema_fb.TensorType]:\n subgraph = _get_subgraph(model_buffer)\n tensor_types = []\n for i in range(subgraph.OutputsLength()):\n index = subgraph.Outputs(i)\n tensor_types.append(subgraph.Tensors(index).Type())\n return tensor_types",
"def input_types(self) -> List[Union[DataType, np.dtype]]:\n return [x.type for x in self.inputs]",
"def type(self) -> np.dtype:\n return self._tensorInfo.dtype",
"def input_types(self):\n return OrderedDict(\n {\n \"audio_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n )",
"def input_types(self):\n return OrderedDict(\n {\n \"audio_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n )",
"def input_types(self):\n return OrderedDict(\n {\n \"audio_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n )",
"def get_spec_type(self):\r\n return self._spec_type",
"def _determine_dtypes_and_shapes(self):\r\n while True:\r\n raw_entry = next(self.entry_generator(yield_just_one=True))\r\n if raw_entry is None:\r\n continue\r\n preprocessed_entry_dict = self.preprocess_entry(raw_entry)\r\n if preprocessed_entry_dict is not None:\r\n break\r\n labels, values = zip(*list(preprocessed_entry_dict.items()))\r\n dtypes = [value.dtype for value in values]\r\n shapes = [value.shape for value in values]\r\n return labels, dtypes, shapes",
"def get_spec(self):\n from schematics.types import ModelType\n spec = {\n 'id': self.name,\n 'description': self.description,\n 'addressable': self.array,\n 'required': self.required,\n }\n if self.type.has_schema:\n spec['schema'] = self.type.get_spec()\n else:\n spec.update(self.type.get_spec())\n\n return spec",
"def fulltypes_for_flat_tensors(element_spec):\n specs = _specs_for_flat_tensors(element_spec)\n full_types_lists = [_translate_to_fulltype_for_flat_tensors(s) for s in specs]\n rval = nest.flatten(full_types_lists) # flattens list-of-list to flat list.\n return rval"
]
| [
"0.66568756",
"0.6340732",
"0.6339329",
"0.62210155",
"0.61656004",
"0.60684854",
"0.5887791",
"0.58818924",
"0.571628",
"0.5686403",
"0.56000185",
"0.5598356",
"0.5592052",
"0.55783784",
"0.55601513",
"0.5499973",
"0.5484487",
"0.5443799",
"0.5402399",
"0.53958815",
"0.538459",
"0.5378274",
"0.5356046",
"0.53362775",
"0.53362775",
"0.53362775",
"0.5331947",
"0.5301294",
"0.5275292",
"0.52575076"
]
| 0.74948585 | 0 |
Returns a batch of tensors translated from `record_batch`. | def ToBatchTensors(
self,
record_batch: pa.RecordBatch,
produce_eager_tensors: Optional[bool] = None) -> Dict[str, Any]:
tf_executing_eagerly = tf.executing_eagerly()
if produce_eager_tensors and not tf_executing_eagerly:
raise RuntimeError(
"Eager Tensors were requested but eager mode was not enabled.")
if produce_eager_tensors is None:
produce_eager_tensors = tf_executing_eagerly
if not record_batch.schema.equals(self._arrow_schema):
raise ValueError("Expected same schema.")
result = {}
for tensor_name, handler in self._type_handlers:
try:
result[tensor_name] = handler.GetTensor(record_batch,
produce_eager_tensors)
except Exception as e:
raise ValueError(
"Error raised when handling tensor '{}'".format(tensor_name)) from e
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetTensor(self, record_batch: pa.RecordBatch,\n produce_eager_tensors: bool) -> Any:",
"def transform_batch(self, the_batch):\n return (\n arg.to(self.device) if isinstance(arg, torch.Tensor) else arg\n for arg in the_batch\n )",
"def generate_batch(\n batch, vocab: Dict[str, int]\n ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:\n input_unigrams = [DatasetLSTM.encode_sequence(b[0][0], vocab) for b in batch]\n input_bigrams = [DatasetLSTM.encode_sequence(b[0][1], vocab) for b in batch]\n input_unigrams = torch.tensor(input_unigrams)\n input_bigrams = torch.tensor(input_bigrams)\n labels = torch.tensor([b[1] for b in batch])\n return (input_unigrams, input_bigrams), labels",
"def __call__(self, batch: iter) -> torch.Tensor or list(torch.Tensor):\n if torch.is_tensor(batch[0]):\n return torch.cat(tuple(t.unsqueeze(0) for t in batch), 0).view(-1, self.batch_size, *batch[0].size())\n elif isinstance(batch[0], int):\n return torch.LongTensor(batch).view(-1, self.batch_size)\n elif isinstance(batch[0], collections.Iterable):\n # if each batch element is not a tensor, then it should be a tuple\n # of tensors; in that case we collate each element in the tuple\n transposed = zip(*batch)\n return tuple(self.__call__(samples) for samples in transposed)\n\n raise TypeError((\"batch must contain tensors, numbers, or lists; found {}\"\n .format(type(batch[0]))))",
"def generate_batch(\n batch: Tuple[Dict[str, Sequence[int]], List[Sequence[int]]]\n ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:\n input_ids = torch.tensor([b[0][\"input_ids\"] for b in batch])\n attention_mask = torch.tensor([b[0][\"attention_mask\"] for b in batch])\n token_type_ids = torch.tensor([b[0][\"token_type_ids\"] for b in batch])\n labels = torch.tensor([b[1] for b in batch])\n features = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n return features, labels",
"def batchify(batch):\n\n PAD_ID = batch[0]['<PAD>']\n inputs_list = [ex['input'] for ex in batch]\n max_length_list = []\n for docs in inputs_list:\n max_length = max([len(doc[1]) for doc in docs])\n max_length_list.append(max_length)\n inputs = []\n for index,docs in enumerate(inputs_list):\n bat_size = len(docs)\n tp_vecs = torch.zeros((bat_size,max_length_list[index]),dtype=torch.long)\n tp_vecs += PAD_ID\n for k,doc in enumerate(docs):\n for j,word in enumerate(doc[1]):\n tp_vecs[k,j] = word\n tp_list = [doc[0] for doc in docs]\n tp_list = torch.tensor(tp_list,dtype=torch.long)\n inputs.append([tp_list,tp_vecs])\n week_index_list = torch.tensor([ex['target'][0] for ex in batch],dtype=torch.long)\n word_index_list = torch.tensor([ex['target'][1] for ex in batch],dtype=torch.long)\n targets = (week_index_list,word_index_list)\n return inputs,targets",
"def to_torch(batch, **kwargs):\n x = torch.from_numpy(np.array(batch, dtype='float32'))\n return x.view(*x.size()[:2], -1).permute(2, 0, 1)",
"def _get_batch(batch, ctx):\n if isinstance(batch, mx.io.DataBatch):\n data = batch.data[0]\n label = batch.label[0]\n else:\n data, label = batch\n return (gluon.utils.split_and_load(data, ctx),\n gluon.utils.split_and_load(label, ctx),\n data.shape[0])",
"def batch_to_device(batch):\n for key in batch:\n if isinstance(batch[key], torch.Tensor):\n batch[key] = batch[key].to(device)\n return batch",
"def _get_batch_data(self, batch):\n try:\n encoders = [ encoder for encoder in self._data_encoder ]\n except:\n encoders = (self._data_encoder,)\n\n try:\n data_batches = [ encoder.transform_batch(rec for _, rec in batch.iterrows())\n for encoder in encoders ]\n except AttributeError:\n data_batches = [\n [ self._get_data(record, encoder) for _, record in batch.iterrows() ]\n for encoder in encoders ]\n\n try:\n batches = [ np.array(encoder.finalize_batch(batch))\n for encoder, batch in zip(encoders, data_batches)]\n except AttributeError:\n batches = [ np.array(batch) for batch in data_batches ]\n\n return batches if len(batches) > 1 else batches[0]",
"def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch",
"def _unroll_sequence_batch(self, batch):\n shape = batch.shape\n if len(shape) == 3: # Model output\n return batch.view(shape[0]*shape[1], shape[2])\n elif len(shape) == 2: # Target labels\n return batch.view(shape[0]*shape[1])",
"def prepare_batch(batch, device=None, non_blocking=False):\n\timages, target = batch\n\treturn [convert_tensor(image, device=device, non_blocking=non_blocking) for image in images], \\\n\t convert_tensor(target, device=device, non_blocking=non_blocking)",
"def imed_collate(batch):\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if torch.is_tensor(batch[0]):\n stacked = torch.stack(batch, 0)\n return stacked\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return __numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.abc.Mapping):\n return {key: imed_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.abc.Sequence):\n return [imed_collate(samples) for samples in batch]\n\n return batch",
"def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])",
"def rebatch(pad_idx, batch):\n return Batch(batch.src, batch.trg, pad_idx)",
"def from_numpy_to_tensor(record, device_id):\n [X, A,\n mol_ids_rep, rep_ids_rep, iw_ids,\n last_append_mask,\n NX, NX_rep,\n action_0, actions,\n log_p] = record\n\n X = nd.array(X, ctx=mx.gpu(device_id), dtype='int32')\n A_sparse = []\n for A_i in A:\n if A_i.shape[0] == 0:\n A_sparse.append(None)\n else:\n # transpose may not be supported in gpu\n A_i = np.concatenate([A_i, A_i[:, [1, 0]]], axis=0)\n\n # construct csr matrix ...\n data = np.ones((A_i.shape[0], ), dtype=np.float32)\n row, col = A_i[:, 0], A_i[:, 1]\n A_sparse_i = nd.sparse.csr_matrix((data, (row, col)),\n shape=tuple([int(X.shape[0]), ]*2),\n ctx=mx.gpu(device_id),\n dtype='float32')\n\n # append to list\n A_sparse.append(A_sparse_i)\n\n batch_size, iw_size = np.asscalar(mol_ids_rep.max() + 1), \\\n np.asscalar(rep_ids_rep.max() + 1)\n\n mol_ids_rep, rep_ids_rep, iw_ids, \\\n last_append_mask, \\\n NX, NX_rep, action_0, actions = [nd.array(_x, ctx=mx.gpu(device_id), dtype='int32')\n for _x in [mol_ids_rep, rep_ids_rep, iw_ids,\n last_append_mask,\n NX, NX_rep, action_0, actions]]\n\n log_p = nd.array(log_p, ctx=mx.gpu(device_id), dtype='float32')\n\n record = [X, A_sparse, iw_ids, last_append_mask,\n NX, NX_rep, action_0, actions, log_p,\n batch_size, iw_size]\n\n\n return record",
"def conv_batchify(self, batch):\n batch_roles = []\n batch_context_tokens = []\n batch_response = []\n\n for conv_dict in batch:\n batch_roles.append(0 if conv_dict['role'] == 'Seeker' else 1)\n context_tokens = [utter + [self.conv_bos_id] for utter in conv_dict['context_tokens']]\n context_tokens[-1] = context_tokens[-1][:-1]\n batch_context_tokens.append(\n truncate(merge_utt(context_tokens), max_length=self.context_truncate, truncate_tail=False),\n )\n batch_response.append(\n add_start_end_token_idx(\n truncate(conv_dict['response'], max_length=self.response_truncate - 2),\n start_token_idx=self.start_token_idx,\n end_token_idx=self.end_token_idx\n )\n )\n\n batch_context_tokens = padded_tensor(items=batch_context_tokens,\n pad_idx=self.pad_token_idx,\n max_len=self.context_truncate,\n pad_tail=False)\n batch_response = padded_tensor(batch_response,\n pad_idx=self.pad_token_idx,\n max_len=self.response_truncate,\n pad_tail=True)\n batch_input_ids = torch.cat((batch_context_tokens, batch_response), dim=1)\n batch_roles = torch.tensor(batch_roles)\n\n return (batch_roles,\n batch_input_ids,\n batch_context_tokens,\n batch_response)",
"def embed_batch(self, batch: batches.TFBatch) -> tf.Tensor:\n data = batches.batch_to_components(batch, self._config.context_features,\n self._config.sequential_features)\n return self.embed_data(data)",
"def _batch_to_device(batch, target_device):\n tensor = _getattr(\"torch\", \"Tensor\")\n for key in batch:\n if isinstance(batch[key], tensor):\n batch[key] = batch[key].to(target_device)\n return batch",
"def collate_without_batching_dict(batch):\n\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.Mapping):\n return [d for d in batch]\n # return {key: collate_without_batching_dict_list([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.Sequence):\n transposed = zip(*batch)\n return [collate_without_batching_dict(samples) for samples in transposed]\n\n raise TypeError((error_msg.format(type(batch[0]))))",
"def get_batch(dataset, batch_size=32):\n data_count = len(dataset)\n x_rand = np.random.randint(0, data_count, size=batch_size)\n y_list = []\n for i in range(0, batch_size):\n y_list.append(dataset[x_rand[i]]['TCLOSE'])\n batch_x = torch.tensor(np.array([x_rand]).T).float()\n batch_y = torch.tensor(np.array([y_list]).T).float()\n return batch_x, batch_y",
"def _query_it(records_query: List[str],\n batch_size: int,\n metadata: Training\n ) -> Iterator[Dict[str, np.ndarray]]:\n dataset = predict_data(records_query, metadata, batch_size)()\n X_tensor = dataset.make_one_shot_iterator().get_next()\n with tf.Session() as sess:\n while True:\n try:\n X = sess.run(X_tensor)\n if \"con\" in X:\n X[\"con\"] = _make_mask(X[\"con\"], X[\"con_mask\"])\n if \"cat\" in X:\n X[\"cat\"] = _make_mask(X[\"cat\"], X[\"cat_mask\"])\n yield X\n except tf.errors.OutOfRangeError:\n break\n return",
"def process_batch_input_for_RNN(batch_input):\n batch_input_ = tf.transpose(batch_input, perm=[2, 0, 1])\n X = tf.transpose(batch_input_)\n\n return X",
"def process_batch_input_for_RNN(self, batch_input):\n batch_input_ = tf.transpose(batch_input, perm=[2, 0, 1])\n x = tf.transpose(batch_input_)\n return x",
"def consume_tfrecord(is_training=True, batch_size=32):\n if is_training:\n dataset = tf.data.TFRecordDataset(tfrecord_file_training)\n else:\n dataset = tf.data.TFRecordDataset(tfrecord_file_eval)\n\n dataset = dataset.map(tfrecord_utils.parse)\n\n if is_training:\n dataset = dataset.map(distorted_input)\n dataset = dataset.repeat()\n dataset = dataset.shuffle(buffer_size=2560)\n else:\n dataset = dataset.map(norm_input)\n\n dataset = dataset.padded_batch(batch_size, padded_shapes=([FLAGS.image_size, FLAGS.image_size, 3], []))\n\n iterator = dataset.make_one_shot_iterator()\n\n return iterator",
"def _batch2torch(self, batch, batch_sz=None):\n\n batch = Transition(*zip(*batch))\n state_batch = torch.cat(batch.state, 0).float()\n\n # ONLY NON Terminal next states\n next_state_batch = [x for x in batch.state_ if x is not None]\n next_state_batch = torch.cat(next_state_batch, 0).float()\n\n action_batch = torch.cat(batch.action, 0)\n reward_batch = torch.cat(batch.reward, 0).float()\n\n mask = torch.cat(batch.done, 0)\n mask = (1 - mask).nonzero().view(-1)\n\n batch_sz = action_batch.size(0)\n\n return [batch_sz, state_batch, action_batch, reward_batch,\n next_state_batch, mask]",
"def _get_batch_fn(dataset):\n def get_batch(idx):\n x_bat = dataset['input'][idx]\n y_bat = dataset['label'][idx]\n x_bat, y_bat = preprocess(x_bat, y_bat)\n\n return x_bat, y_bat\n\n return get_batch",
"def process_state_batch(self, batch):\n return np.squeeze(batch, axis=1)",
"def default_prediction_collate(batch):\n error_msg = \"batch must contain tensors or slice; found {}\"\n if isinstance(batch[0], torch.Tensor):\n return torch.stack(batch, 0)\n elif isinstance(batch[0], tuple) and isinstance(batch[0][0], slice):\n return batch\n elif isinstance(batch[0], collections.abc.Sequence):\n transposed = zip(*batch)\n return [default_prediction_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg.format(type(batch[0]))))"
]
| [
"0.68435967",
"0.67113644",
"0.64927554",
"0.6489503",
"0.6200377",
"0.6178752",
"0.6141338",
"0.60852915",
"0.60623634",
"0.6045422",
"0.59889543",
"0.5961333",
"0.59462744",
"0.592611",
"0.5886543",
"0.5885961",
"0.5879243",
"0.5869325",
"0.5849582",
"0.5841679",
"0.5822695",
"0.57868296",
"0.575363",
"0.56812805",
"0.56792295",
"0.5677234",
"0.56673336",
"0.56621313",
"0.56545454",
"0.5637255"
]
| 0.7057336 | 0 |
Initializer. It can be assumed that CanHandle(arrow_schema, tensor_representation) would return true. | def __init__(self, arrow_schema: pa.Schema,
tensor_representation: schema_pb2.TensorRepresentation): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CanHandle(arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:",
"def __init__(self, schema: dict, **kwargs) -> None:\r\n self.case_check = case_check(settings.CASE)\r\n self.ignored_keys = set_ignored_keys(**kwargs)\r\n if read_type(schema) == 'object':\r\n logger.debug('root -> dict')\r\n self.test_dict(schema)\r\n elif read_type(schema) == 'array':\r\n logger.debug('root -> list')\r\n self.test_list(schema)\r\n else:\r\n logger.debug('Skipping case check')",
"def CanHandle(arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:\n ragged_tensor = tensor_representation.ragged_tensor\n if len(ragged_tensor.feature_path.step) < 1:\n return False\n\n value_path = path.ColumnPath.from_proto(ragged_tensor.feature_path)\n\n # Checking the outer dimensions represented by the value feature path.\n contains_list = False\n try:\n arrow_type = None\n for arrow_type in _EnumerateTypesAlongPath(arrow_schema, value_path):\n if _IsListLike(arrow_type):\n contains_list = True\n if pa.types.is_struct(arrow_type):\n # The path is depleted, but the last arrow_type is a struct. This means\n # the path is a Non-leaf field.\n return False\n except ValueError:\n # ValueError signifies wrong column name / field name requested.\n return False\n if not contains_list:\n return False\n\n # Check the auxiliar features that need to be accessed to form the inner\n # dimensions partitions.\n parent_path = value_path.parent()\n\n # Check the columns exists and have correct depth and type.\n for partition in ragged_tensor.partition:\n if partition.HasField(\"row_length\"):\n try:\n field_path = parent_path.child(partition.row_length)\n # To avoid loop undefined variable lint error.\n partition_type = arrow_schema.field(field_path.initial_step()).type\n for partition_type in _EnumerateTypesAlongPath(\n arrow_schema, field_path, stop_at_path_end=True):\n # Iterate through them all. Only interested on the last type.\n pass\n if not _IsListLike(partition_type) or not pa.types.is_integer(\n partition_type.value_type):\n return False\n except ValueError:\n # ValueError signifies wrong column name / field name requested.\n return False\n\n elif partition.HasField(\"uniform_row_length\"):\n if partition.uniform_row_length <= 0:\n return False\n else:\n return False\n\n # All checks passed successfully.\n return True",
"def init_tensors(self, sample, *args):\n raise NotImplementedError",
"def __init__(self, data):\n if isinstance(data, np.ndarray):\n data = nd.array(data)\n\n self.__init_handle_by_constructor__(\n _make.TensorValue, data)",
"def __init__(self, tensor, df):\n super().__init__()\n self.tensor = tensor\n self.df = df",
"def __init__(self, schema: GraphQLSchema):\n\n if not isinstance(schema, GraphQLSchema):\n raise TypeError(\n f\"DSLSchema needs a schema as parameter. Received: {type(schema)}\"\n )\n\n self._schema: GraphQLSchema = schema",
"def __init__(self, reader_schema, writer_schema=None, input_file=None):\n\n if writer_schema is None:\n writer_schema = reader_schema\n self._reader_schema = reader_schema\n self._writer_schema = writer_schema\n self._reader_schema_json = json.loads(str(self._reader_schema))\n self._writer_schema_json = json.loads(str(self._writer_schema))\n self._input_file = input_file\n self._set_avro_readers()",
"def CanHandle(arrow_schema: pa.Schema,\n tensor_representation: schema_pb2.TensorRepresentation) -> bool:\n sparse_representation = tensor_representation.sparse_tensor\n if (len(sparse_representation.dense_shape.dim) != len(\n sparse_representation.index_column_names)):\n return False\n\n # All the index columns must be of integral types.\n for index_column in sparse_representation.index_column_names:\n depth, value_type = _GetNestDepthAndValueType(\n arrow_schema, path.ColumnPath(index_column))\n if depth != 1 or not pa.types.is_integer(value_type):\n return False\n\n depth, value_type = _GetNestDepthAndValueType(\n arrow_schema, path.ColumnPath(sparse_representation.value_column_name))\n return depth == 1 and _IsSupportedArrowValueType(value_type)",
"def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False",
"def __init__(self, pretransformed_input, transform_fn, dtype=None,\n shape=NONE_SPECIFIED, also_track=None, name=None):\n pretransformed_input = tensor_util.convert_nonref_to_tensor(\n pretransformed_input,\n name='pretransformed_input')\n\n if dtype is None:\n dtype = (getattr(transform_fn, 'dtype', None) or\n dtype_util.base_dtype(pretransformed_input.dtype))\n try:\n dtype = None if dtype is None else tf.as_dtype(dtype)\n except TypeError:\n raise TypeError('Argument `dtype` must be convertible to a '\n '`tf.dtypes.DType`; saw \"{}\" of type \"{}\".'.format(\n repr(dtype), type(dtype)))\n\n if shape == NONE_SPECIFIED:\n shape = getattr(transform_fn, 'forward_event_shape', _identity)\n shape = shape(pretransformed_input.shape)\n try:\n shape = tf.TensorShape(shape)\n except TypeError:\n raise TypeError('Argument `shape` must be convertible to a '\n '`tf.TensorShape`; saw \"{}\".'.format(shape))\n\n name = name or getattr(transform_fn, 'name', None)\n if not name:\n name = '_'.join([\n transform_fn.__name__,\n getattr(pretransformed_input, 'name', '')])\n name = name_util.strip_invalid_chars(name)\n name = name_util.camel_to_lower_snake(name)\n name = name_util.get_name_scope_name(name)\n name = name_util.strip_invalid_chars(name)\n\n if hasattr(transform_fn, 'forward'):\n fwd_name = '\"{}\"'.format(transform_fn.name)\n else:\n fwd_name = transform_fn.__name__\n if not callable(transform_fn):\n raise TypeError('Argument `transform_fn` must be `callable`.')\n\n super(DeferredTensor, self).__init__(name=name)\n self._pretransformed_input = pretransformed_input\n self._transform_fn = transform_fn\n self._dtype = dtype\n self._shape = shape\n self._also_track = also_track\n self._name = name\n self._fwd_name = fwd_name\n\n # Secret handshake with tf.is_tensor to return True for DT.\n #\n # Works around `tf.get_static_value` not returning `None`.\n # With this, `tf.get_static_value` returns `None`, and without\n # this returns the `DeferredTensor` object.\n # TODO(b/140157055): Remove this shim after LinOp is patched in 2.0.\n self.is_tensor_like = True",
"def __init__(self, shape, dtype='float32'):\n if not isinstance(shape, (tuple, list)):\n raise TypeError('shape must be a tuple or list: %s' % str(shape))\n self._type_shape = loom.TypeShape(dtype, shape)",
"def __init__(self, shape):\n\n self.shape = shape",
"def initialize(self, tup):\n self.set_io_dims(tup)",
"def __init__(self, schema=None):\n self.schema = schema or {}",
"def _initialize(self) -> None:\n p = self.params\n # We make self.input public so that users can access its methods like\n # IdsToStrings if needed.\n with py_utils.infeed_context_scope(\n infeed_host_index=p.infeed_host_index,\n num_infeed_hosts=p.num_infeed_hosts):\n self.input = p.input.Instantiate()\n\n if hasattr(self.input, 'datasource') and isinstance(\n self.input.datasource, datasource.TFDatasetSource):\n # For the special case when the input is implemented by a tf.data.Dataset,\n # use it directly. Otherwise roundtrip adaptions may result in returning\n # duplciate batches.\n self._get_next_fn = self.input.datasource.GetNext\n else:\n self._get_next_fn = tf.function(self._get_batch)\n self._num_batches_produced = 0",
"def __init__(self, brain_spec, hparams):\n self._brain_spec = brain_spec\n self._validate_spec()\n self._hparams = hparams\n super().__init__(input_tensor_spec=brain_spec.observation_spec.tfa_spec)",
"def __post_init__(self):\n assert self.data.shape == self.theory.shape",
"def __init__(self, input_tensor_spec):\n self._input_tensor_spec = input_tensor_spec\n super().__init__()",
"def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False",
"def ArrowSchema(self) -> pa.Schema:",
"def initialize(self, embedding_fn: EmbeddingFn, inputs: Optional[torch.Tensor], sequence_length: Optional[torch.LongTensor]) ->HelperInitTuple:\n raise NotImplementedError",
"def _init(self):\n raise NotImplementedError",
"def __post_init__(self) -> None:\n if get_origin(self.type) is Union:\n args = get_args(self.type)\n nonnull = tuple(a for a in args if a is not type(None))\n if len(nonnull) < len(args):\n # object.__setattr__ because we are using a frozen dataclass\n object.__setattr__(self, \"_original_annotation\", self.type)\n object.__setattr__(self, \"type\", Union[nonnull])\n object.__setattr__(self, \"nullable\", True)",
"def initialize(self):\n raise NotImplementedError",
"def initialize(self):\n raise NotImplementedError",
"def initialize(self):\n raise NotImplementedError",
"def __init__(__self__, *,\n endpoint_type: pulumi.Input[str],\n blob_container_name: Optional[pulumi.Input[str]] = None,\n resource_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"endpoint_type\", 'StorageBlob')\n if blob_container_name is not None:\n pulumi.set(__self__, \"blob_container_name\", blob_container_name)\n if resource_id is not None:\n pulumi.set(__self__, \"resource_id\", resource_id)",
"def __init__(self, *handlers):\n if (len(handlers) == 1) and (type(handlers[0]) in SequenceTypes):\n handlers = handlers[0]\n self.handlers = handlers\n self.set_validate()",
"def __init__(\n self,\n feature_set: Tensor,\n transform_on_train: bool = False,\n transform_on_eval: bool = True,\n transform_on_fantasize: bool = False,\n ) -> None:\n super().__init__()\n if feature_set.dim() != 2:\n raise ValueError(\"`feature_set` must be an `n_f x d_f`-dim tensor!\")\n self.register_buffer(\"feature_set\", feature_set)\n self.transform_on_train = transform_on_train\n self.transform_on_eval = transform_on_eval\n self.transform_on_fantasize = transform_on_fantasize"
]
| [
"0.6957694",
"0.58774436",
"0.5753732",
"0.57403076",
"0.55515134",
"0.55478054",
"0.54788816",
"0.5473977",
"0.54100204",
"0.5403191",
"0.5394013",
"0.5376588",
"0.5372768",
"0.53579617",
"0.53541607",
"0.5344507",
"0.53393805",
"0.5317258",
"0.5312363",
"0.5267516",
"0.52650976",
"0.5192161",
"0.5190485",
"0.5160386",
"0.51599115",
"0.51599115",
"0.51599115",
"0.5133029",
"0.5132372",
"0.5119526"
]
| 0.75617373 | 0 |
Returns the TypeSpec of the converted Tensor or CompositeTensor. | def type_spec(self) -> tf.TypeSpec:
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_spec_type(self):\r\n return self._spec_type",
"def TypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n return self._type_specs",
"def get_tf_dtype(dtype): # pylint: disable=too-many-return-statements\n if dtype in {'float', 'float32', 'tf.float32', float,\n np.float32, tf.float32}:\n return tf.float32\n elif dtype in {'float64', 'tf.float64', np.float64, np.float_, tf.float64}:\n return tf.float64\n elif dtype in {'float16', 'tf.float16', np.float16, tf.float16}:\n return tf.float16\n elif dtype in {'int', 'int32', 'tf.int32', int, np.int32, tf.int32}:\n return tf.int32\n elif dtype in {'int64', 'tf.int64', np.int64, tf.int64}:\n return tf.int64\n elif dtype in {'int16', 'tf.int16', np.int16, tf.int16}:\n return tf.int16\n elif dtype in {'bool', 'tf.bool', bool, np.bool_, tf.bool}:\n return tf.bool\n elif dtype in {'string', 'str', 'tf.string', str, np.str, tf.string}:\n return tf.string\n try:\n if dtype == {'unicode', unicode}:\n return tf.string\n except NameError:\n pass\n\n raise ValueError(\n \"Unsupported conversion from type {} to tf dtype\".format(str(dtype)))",
"def type(self) -> np.dtype:\n return self._tensorInfo.dtype",
"def get_c_type(self, tensor_type):\n try:\n from tflite.TensorType import TensorType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n if tensor_type == TensorType.UINT8:\n return (\"uint8_t\", 1)\n if tensor_type == TensorType.FLOAT32:\n return (\"float\", 4)\n if tensor_type == TensorType.INT32:\n return (\"int32_t\", 4)\n raise NotImplementedError(\"Tensor type {} is currently not supported\"\n .format(str(tensor_type)))",
"def kind(self):\n # type () -> str\n return np.dtype(self.type).kind",
"def feature_spec(self):\n if not self.infer_without_label:\n feature_shapes = [(tf.TensorShape([tf.Dimension(None)]), tf.TensorShape([]),\n tf.TensorShape([tf.Dimension(None)]), tf.TensorShape([]))]\n feature_shapes.append(tf.TensorShape([tf.Dimension(None)]))\n else:\n feature_shapes = [(tf.TensorShape([tf.Dimension(None)]), tf.TensorShape([]))\n ]\n if len(feature_shapes) == 1:\n return feature_shapes[0]\n return tuple(feature_shapes)",
"def get_data_type(params):\n return tf.float16 if params.use_fp16 else tf.float32",
"def GetFillerType(tensor):\n return _C.GetFillerType(_stringify_tensor(tensor))",
"def data_type_spec(self) -> Optional[pulumi.Input['AssetModelDataTypeSpec']]:\n return pulumi.get(self, \"data_type_spec\")",
"def is_tensorflow_compatible_type(type_spec):\n if type_spec is None:\n return True\n\n def _predicate(type_spec: computation_types.Type) -> bool:\n return isinstance(\n type_spec,\n (\n computation_types.SequenceType,\n computation_types.StructType,\n computation_types.TensorType,\n ),\n )\n\n return contains_only(type_spec, _predicate)",
"def dtype_specs(self):\r\n #TODO: add more type correspondances for e.g. int32, int64, float32,\r\n #complex64, etc.\r\n try:\r\n return {'float32': (float, 'npy_float32', 'NPY_FLOAT32'),\r\n 'float64': (float, 'npy_float64', 'NPY_FLOAT64'),\r\n 'uint8': (int, 'npy_uint8', 'NPY_UINT8'),\r\n 'int8': (int, 'npy_int8', 'NPY_INT8'),\r\n 'uint16': (int, 'npy_uint16', 'NPY_UINT16'),\r\n 'int16': (int, 'npy_int16', 'NPY_INT16'),\r\n 'uint32': (int, 'npy_uint32', 'NPY_UINT32'),\r\n 'int32': (int, 'npy_int32', 'NPY_INT32'),\r\n 'uint64': (int, 'npy_uint64', 'NPY_UINT64'),\r\n 'int64': (int, 'npy_int64', 'NPY_INT64'),\r\n 'complex128': (complex, 'theano_complex128',\r\n 'NPY_COMPLEX128'),\r\n 'complex64': (complex, 'theano_complex64',\r\n 'NPY_COMPLEX64')}[self.dtype]\r\n except KeyError:\r\n raise TypeError(\"Unsupported dtype for %s: %s\" % (\r\n self.__class__.__name__, self.dtype))",
"def dtype_specs(self):\r\n # TODO: add more type correspondances for e.g. int32, int64, float32,\r\n # complex64, etc.\r\n try:\r\n return {\r\n 'float32': (float, 'npy_float32', 'NPY_FLOAT32'),\r\n 'float64': (float, 'npy_float64', 'NPY_FLOAT64'),\r\n 'uint8': (int, 'npy_uint8', 'NPY_UINT8'),\r\n 'int8': (int, 'npy_int8', 'NPY_INT8'),\r\n 'uint16': (int, 'npy_uint16', 'NPY_UINT16'),\r\n 'int16': (int, 'npy_int16', 'NPY_INT16'),\r\n 'uint32': (int, 'npy_uint32', 'NPY_UINT32'),\r\n 'int32': (int, 'npy_int32', 'NPY_INT32'),\r\n 'uint64': (int, 'npy_uint64', 'NPY_UINT64'),\r\n 'int64': (int, 'npy_int64', 'NPY_INT64'),\r\n 'complex128': (complex, 'theano_complex128', 'NPY_COMPLEX128'),\r\n 'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')\r\n }[self.dtype]\r\n except KeyError:\r\n raise TypeError(\"Unsupported dtype for %s: %s\"\r\n % (self.__class__.__name__, self.dtype))",
"def OriginalTypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n return self._original_type_specs",
"def tensor_type(type_str):\n return mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(type_str.lower())]",
"def get_type(self):\n\n return self.scalertype",
"def get_data_types():\n return tf.float32, tf.float32, tf.int32",
"def dataType(self, data):\n if isinstance(data,str):\n return STRING\n elif isinstance(data,dict):\n return ASSOC\n elif isinstance(data,int) or isinstance(data,float):\n return STRING\n elif is_python2() and isinstance(data,long):\n return STRING\n elif isinstance(data, SpecArray.SpecArrayData):\n self.rows, self.cols = data.shape\n return data.type",
"def dtype(self) -> tf.dtypes.DType:",
"def GetFillerType(tensor):\n return GetFillerTypeCC(_stringify_tensor(tensor))",
"def __Type(thing):\n from .native import (Vector, Color, Matrix, VectorField, ColorField,\n MatrixField, ScalarField)\n if isinstance(thing, (int, float, ScalarField)): return 'Scalar'\n if isinstance(thing, (Vector, VectorField)): return 'Vector'\n if isinstance(thing, (Color, ColorField)): return 'Color'\n if isinstance(thing, (Matrix, MatrixField)): return 'Matrix'\n raise ValueError(\"Could not guess type of: \" + thing)",
"def kind(self):\r\n return TypeKind.from_id(self._kind_id)",
"def visitTensorType(self, ctx):\n # type: (RelayParser.TensorTypeContext) -> ty.TensorType\n\n shape = self.visit(ctx.shapeSeq())\n dtype = self.visit(ctx.type_())\n\n if not isinstance(dtype, ty.TensorType):\n raise ParseError(\"Expected dtype to be a Relay base type.\")\n\n dtype = dtype.dtype\n\n return ty.TensorType(shape, dtype)",
"def data_type():\n if FLAGS.use_fp16:\n return tf.float16\n else:\n return tf.float32",
"def data_type():\n if FLAGS.use_fp16:\n return tf.float16\n else:\n return tf.float32",
"def numpy_types(self) -> List[np.dtype]:\n if self.is_tensor_spec():\n return [x.type for x in self.inputs]\n return [x.type.to_numpy() for x in self.inputs]",
"def dtype_type( dtype, name = None ):\n if name:\n for property in dtype.descr:\n if property[ 0 ] == name:\n return property[ 1 ]\n raise ValueError( \"Property not found\" )\n else:\n if len( dtype.descr ) > 1:\n raise ValueError( \"Multiple types present\" )\n\n return dtype.descr[ 0 ][ 1 ]",
"def get_dtype(arr, backend='autograd'):\n if backend == 'pytorch':\n return pytorch_dtype_query_mapping_dict[arr.dtype]\n elif backend == 'autograd':\n return str(arr.dtype)",
"def get_param_type(value):\n dtype = type(value).__name__\n if dtype not in ['str', 'float', 'int', 'bool', 'list']:\n dtype = 'str'\n if dtype == 'list':\n try:\n eltype = type(value[0]).__name__\n except IndexError:\n eltype = 'str'\n if eltype not in ['str', 'float', 'int', 'bool']:\n eltype = 'str'\n dtype = eltype + dtype\n return dtype",
"def dtype_to_type(dtype) -> Type:\n if dtype == np.object:\n return str\n else:\n return type(np.zeros(1, dtype).item())"
]
| [
"0.6116064",
"0.60049427",
"0.597106",
"0.56536496",
"0.56475276",
"0.55525804",
"0.55150294",
"0.549156",
"0.5469775",
"0.54503685",
"0.5421005",
"0.54045445",
"0.53997046",
"0.53790647",
"0.53731406",
"0.5366895",
"0.5364541",
"0.5344615",
"0.53302383",
"0.53137934",
"0.5300647",
"0.52954733",
"0.52831477",
"0.52523994",
"0.52265424",
"0.5221329",
"0.5219971",
"0.5212214",
"0.52034545",
"0.5193494"
]
| 0.68329406 | 0 |
Converts the RecordBatch to Tensor or CompositeTensor. The result must be of the same (not only compatible) TypeSpec as self.type_spec. | def GetTensor(self, record_batch: pa.RecordBatch,
produce_eager_tensors: bool) -> Any: | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __call__(self, batch: iter) -> torch.Tensor or list(torch.Tensor):\n if torch.is_tensor(batch[0]):\n return torch.cat(tuple(t.unsqueeze(0) for t in batch), 0).view(-1, self.batch_size, *batch[0].size())\n elif isinstance(batch[0], int):\n return torch.LongTensor(batch).view(-1, self.batch_size)\n elif isinstance(batch[0], collections.Iterable):\n # if each batch element is not a tensor, then it should be a tuple\n # of tensors; in that case we collate each element in the tuple\n transposed = zip(*batch)\n return tuple(self.__call__(samples) for samples in transposed)\n\n raise TypeError((\"batch must contain tensors, numbers, or lists; found {}\"\n .format(type(batch[0]))))",
"def ToBatchTensors(\n self,\n record_batch: pa.RecordBatch,\n produce_eager_tensors: Optional[bool] = None) -> Dict[str, Any]:\n\n tf_executing_eagerly = tf.executing_eagerly()\n if produce_eager_tensors and not tf_executing_eagerly:\n raise RuntimeError(\n \"Eager Tensors were requested but eager mode was not enabled.\")\n if produce_eager_tensors is None:\n produce_eager_tensors = tf_executing_eagerly\n\n if not record_batch.schema.equals(self._arrow_schema):\n raise ValueError(\"Expected same schema.\")\n result = {}\n for tensor_name, handler in self._type_handlers:\n try:\n result[tensor_name] = handler.GetTensor(record_batch,\n produce_eager_tensors)\n except Exception as e:\n raise ValueError(\n \"Error raised when handling tensor '{}'\".format(tensor_name)) from e\n\n return result",
"def batch_to_device(batch):\n for key in batch:\n if isinstance(batch[key], torch.Tensor):\n batch[key] = batch[key].to(device)\n return batch",
"def _batch_to_device(batch, target_device):\n tensor = _getattr(\"torch\", \"Tensor\")\n for key in batch:\n if isinstance(batch[key], tensor):\n batch[key] = batch[key].to(target_device)\n return batch",
"def _batch(self, batch_size):\n transform_or_spec = self._specs.get(\n 'transform_or_spec', self.transform_or_spec)\n if hasattr(transform_or_spec, '_batch'):\n transform_or_spec = transform_or_spec._batch(batch_size)\n return _DeferredTensorSpec(\n self._get_batched_input_spec(batch_size),\n transform_or_spec=transform_or_spec,\n dtype=self.dtype,\n shape=(None if self.shape is None\n else tf.TensorShape([batch_size]).concatenate(self.shape)),\n name=self.name,\n also_track_spec=self._also_track_spec)",
"def list_to_backend_type(data: List) -> TTensor:",
"def transform_batch(self, the_batch):\n return (\n arg.to(self.device) if isinstance(arg, torch.Tensor) else arg\n for arg in the_batch\n )",
"def embed_batch(self, batch: batches.TFBatch) -> tf.Tensor:\n data = batches.batch_to_components(batch, self._config.context_features,\n self._config.sequential_features)\n return self.embed_data(data)",
"def batch_input(self, type_shape, batch_idx):\n try:\n ts_idx = self._loom._type_shape_to_idx[type_shape]\n except KeyError:\n raise TypeError('Constant is not of a recognized TypeShape: %s' %\n str(type_shape))\n batch_input = self._weaver.BatchInput(ts_idx, batch_idx)\n if batch_input == -1:\n raise AssertionError('Weaver Batch Input creation failed: %s' %\n self._weaver.error_string())\n return batch_input",
"def visitTensorType(self, ctx):\n # type: (RelayParser.TensorTypeContext) -> ty.TensorType\n\n shape = self.visit(ctx.shapeSeq())\n dtype = self.visit(ctx.type_())\n\n if not isinstance(dtype, ty.TensorType):\n raise ParseError(\"Expected dtype to be a Relay base type.\")\n\n dtype = dtype.dtype\n\n return ty.TensorType(shape, dtype)",
"def from_numpy_to_tensor(record, device_id):\n [X, A,\n mol_ids_rep, rep_ids_rep, iw_ids,\n last_append_mask,\n NX, NX_rep,\n action_0, actions,\n log_p] = record\n\n X = nd.array(X, ctx=mx.gpu(device_id), dtype='int32')\n A_sparse = []\n for A_i in A:\n if A_i.shape[0] == 0:\n A_sparse.append(None)\n else:\n # transpose may not be supported in gpu\n A_i = np.concatenate([A_i, A_i[:, [1, 0]]], axis=0)\n\n # construct csr matrix ...\n data = np.ones((A_i.shape[0], ), dtype=np.float32)\n row, col = A_i[:, 0], A_i[:, 1]\n A_sparse_i = nd.sparse.csr_matrix((data, (row, col)),\n shape=tuple([int(X.shape[0]), ]*2),\n ctx=mx.gpu(device_id),\n dtype='float32')\n\n # append to list\n A_sparse.append(A_sparse_i)\n\n batch_size, iw_size = np.asscalar(mol_ids_rep.max() + 1), \\\n np.asscalar(rep_ids_rep.max() + 1)\n\n mol_ids_rep, rep_ids_rep, iw_ids, \\\n last_append_mask, \\\n NX, NX_rep, action_0, actions = [nd.array(_x, ctx=mx.gpu(device_id), dtype='int32')\n for _x in [mol_ids_rep, rep_ids_rep, iw_ids,\n last_append_mask,\n NX, NX_rep, action_0, actions]]\n\n log_p = nd.array(log_p, ctx=mx.gpu(device_id), dtype='float32')\n\n record = [X, A_sparse, iw_ids, last_append_mask,\n NX, NX_rep, action_0, actions, log_p,\n batch_size, iw_size]\n\n\n return record",
"def embed(self, batch: Union[List[List[int]], List[List[str]]]) -> Tuple[FloatTensor, LongTensor]:\n pass",
"def consume_tfrecord(is_training=True, batch_size=32):\n if is_training:\n dataset = tf.data.TFRecordDataset(tfrecord_file_training)\n else:\n dataset = tf.data.TFRecordDataset(tfrecord_file_eval)\n\n dataset = dataset.map(tfrecord_utils.parse)\n\n if is_training:\n dataset = dataset.map(distorted_input)\n dataset = dataset.repeat()\n dataset = dataset.shuffle(buffer_size=2560)\n else:\n dataset = dataset.map(norm_input)\n\n dataset = dataset.padded_batch(batch_size, padded_shapes=([FLAGS.image_size, FLAGS.image_size, 3], []))\n\n iterator = dataset.make_one_shot_iterator()\n\n return iterator",
"def _get_batch(batch, ctx):\n if isinstance(batch, mx.io.DataBatch):\n data = batch.data[0]\n label = batch.label[0]\n else:\n data, label = batch\n return (gluon.utils.split_and_load(data, ctx),\n gluon.utils.split_and_load(label, ctx),\n data.shape[0])",
"def process_batch_input_for_RNN(self, batch_input):\n batch_input_ = tf.transpose(batch_input, perm=[2, 0, 1])\n x = tf.transpose(batch_input_)\n return x",
"def generate_batch(\n batch: Tuple[Dict[str, Sequence[int]], List[Sequence[int]]]\n ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:\n input_ids = torch.tensor([b[0][\"input_ids\"] for b in batch])\n attention_mask = torch.tensor([b[0][\"attention_mask\"] for b in batch])\n token_type_ids = torch.tensor([b[0][\"token_type_ids\"] for b in batch])\n labels = torch.tensor([b[1] for b in batch])\n features = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n return features, labels",
"def train_batch(\n self, batch: TorchData, model: nn.Module, epoch_idx: int, batch_idx: int\n ) -> Union[torch.Tensor, Dict[str, Any]]:\n pass",
"def conv_batchify(self, batch):\n batch_roles = []\n batch_context_tokens = []\n batch_response = []\n\n for conv_dict in batch:\n batch_roles.append(0 if conv_dict['role'] == 'Seeker' else 1)\n context_tokens = [utter + [self.conv_bos_id] for utter in conv_dict['context_tokens']]\n context_tokens[-1] = context_tokens[-1][:-1]\n batch_context_tokens.append(\n truncate(merge_utt(context_tokens), max_length=self.context_truncate, truncate_tail=False),\n )\n batch_response.append(\n add_start_end_token_idx(\n truncate(conv_dict['response'], max_length=self.response_truncate - 2),\n start_token_idx=self.start_token_idx,\n end_token_idx=self.end_token_idx\n )\n )\n\n batch_context_tokens = padded_tensor(items=batch_context_tokens,\n pad_idx=self.pad_token_idx,\n max_len=self.context_truncate,\n pad_tail=False)\n batch_response = padded_tensor(batch_response,\n pad_idx=self.pad_token_idx,\n max_len=self.response_truncate,\n pad_tail=True)\n batch_input_ids = torch.cat((batch_context_tokens, batch_response), dim=1)\n batch_roles = torch.tensor(batch_roles)\n\n return (batch_roles,\n batch_input_ids,\n batch_context_tokens,\n batch_response)",
"def convert_to_tf_record(_):\n\n mnist = input_data.read_data_sets(\n \"/tmp/tensorflow/mnist/input_data\",\n reshape=False\n )\n\n convert_to(mnist.validation, 'validation', FLAGS.data_directory)\n convert_to(mnist.train, 'train', FLAGS.data_directory, num_shards=10)\n convert_to(mnist.test, 'test', FLAGS.data_directory)",
"def _unbatch(self):\n transform_or_spec = self._specs.get(\n 'transform_or_spec', self.transform_or_spec)\n if hasattr(transform_or_spec, '_unbatch'):\n transform_or_spec = transform_or_spec._unbatch()\n return _DeferredTensorSpec(\n self._get_unbatched_input_spec(),\n transform_or_spec=transform_or_spec,\n dtype=self.dtype,\n shape=(None if self.shape is None else self.shape[1:]),\n name=self.name,\n also_track_spec=self._also_track_spec)",
"def to_tfrecord(data_blob):\n\n id = np.array(data_blob['id'], dtype=np.int32).tobytes()\n dim = np.array(data_blob['images'].shape, dtype=np.int32).tobytes()\n\n images = np.array(data_blob['images'], dtype=np.uint8).tobytes()\n poses = np.array(data_blob['poses'], dtype=np.float32).tobytes()\n depth = np.array(data_blob['depth'], dtype=np.float32).tobytes()\n filled = np.array(data_blob['filled'], dtype=np.float32).tobytes()\n intrinsics = np.array(data_blob['intrinsics'], dtype=np.float32).tobytes()\n\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[id])),\n 'dim': tf.train.Feature(bytes_list=tf.train.BytesList(value=[dim])),\n 'images': tf.train.Feature(bytes_list=tf.train.BytesList(value=[images])),\n 'poses': tf.train.Feature(bytes_list=tf.train.BytesList(value=[poses])),\n 'depth': tf.train.Feature(bytes_list=tf.train.BytesList(value=[depth])),\n 'filled': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filled])),\n 'intrinsics': tf.train.Feature(bytes_list=tf.train.BytesList(value=[intrinsics])),\n }))\n\n return example",
"def process_batch_input_for_RNN(batch_input):\n batch_input_ = tf.transpose(batch_input, perm=[2, 0, 1])\n X = tf.transpose(batch_input_)\n\n return X",
"def __call__(self, shape, dtype=None):\n\n rank = len(shape)\n assert rank in [1,2], 'LogReg can only be used to initialize Dense-like kernels & biases' \n\n n_class, n_feat = self.model.coef_.shape\n\n if rank == 1:\n assert shape == (n_class,), '1D `shape` should match LogReg.model.intercept_'\n return tf.convert_to_tensor(self.model.intercept_.astype(np.float32), dtype=dtype)\n\n elif rank == 2:\n assert shape == (n_feat, n_class), '2D `shape` should match LogReg.model.coef_.T'\n return tf.convert_to_tensor(self.model.coef_.T.astype(np.float32), dtype=dtype)",
"def TensorRepresentations(self) -> tensor_adapter.TensorRepresentations:",
"def _decode_record(record):\r\n example = tf.io.parse_single_example(serialized=record, features=feature_description)\r\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\r\n # So cast all int64 to int32.\r\n for key in [k for k in example.keys() if k not in ['example_id', 'unique_ids']]:\r\n example[key] = tf.cast(example[key], dtype=tf.int32)\r\n if is_training:\r\n features = {\r\n 'input_ids': example['input_ids'],\r\n 'input_mask': example['input_mask'],\r\n 'segment_ids': example['segment_ids']\r\n }\r\n labels = {\r\n 'start_logits_or_probs': tf.one_hot(example['start_positions'],\r\n depth=seq_length, dtype=tf.float32),\r\n 'end_logits_or_probs': tf.one_hot(example['end_positions'],\r\n depth=seq_length, dtype=tf.float32),\r\n 'ans_type': tf.one_hot(example['answer_types'],\r\n depth=len(ANSWER_TYPE_ORDER), dtype=tf.float32)\r\n }\r\n return (features, labels)\r\n else:\r\n return example",
"def collate_without_batching_dict(batch):\n\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.Mapping):\n return [d for d in batch]\n # return {key: collate_without_batching_dict_list([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.Sequence):\n transposed = zip(*batch)\n return [collate_without_batching_dict(samples) for samples in transposed]\n\n raise TypeError((error_msg.format(type(batch[0]))))",
"def _get_batch_data(self, batch):\n try:\n encoders = [ encoder for encoder in self._data_encoder ]\n except:\n encoders = (self._data_encoder,)\n\n try:\n data_batches = [ encoder.transform_batch(rec for _, rec in batch.iterrows())\n for encoder in encoders ]\n except AttributeError:\n data_batches = [\n [ self._get_data(record, encoder) for _, record in batch.iterrows() ]\n for encoder in encoders ]\n\n try:\n batches = [ np.array(encoder.finalize_batch(batch))\n for encoder, batch in zip(encoders, data_batches)]\n except AttributeError:\n batches = [ np.array(batch) for batch in data_batches ]\n\n return batches if len(batches) > 1 else batches[0]",
"def to_torch(batch, **kwargs):\n x = torch.from_numpy(np.array(batch, dtype='float32'))\n return x.view(*x.size()[:2], -1).permute(2, 0, 1)",
"def cast_from_tensor_class(self, results: List[BackendEagerTensor]\n ) -> Union[Any, Tuple[Any]]:\n if isinstance(results, (tuple, list)):\n if len(results) == 1:\n return results[0].value\n return tuple(r.value for r in results)\n return results.value",
"def __call__(self, batch: List[List[int]], **kwargs) -> Union[List[List[np.ndarray]], List[np.ndarray]]:\n one_hotted_batch = []\n\n for utt in batch:\n if isinstance(utt, Iterable):\n one_hotted_utt = self._to_one_hot(utt, self._depth)\n elif isinstance(utt, int):\n if self._pad_zeros or self.single_vector:\n one_hotted_utt = self._to_one_hot([utt], self._depth)\n else:\n one_hotted_utt = self._to_one_hot([utt], self._depth).reshape(-1)\n\n if self.single_vector:\n one_hotted_utt = np.sum(one_hotted_utt, axis=0)\n\n one_hotted_batch.append(one_hotted_utt)\n\n if self._pad_zeros:\n one_hotted_batch = zero_pad(one_hotted_batch)\n return one_hotted_batch"
]
| [
"0.63535535",
"0.60607845",
"0.5949915",
"0.5802738",
"0.5792601",
"0.5786943",
"0.5721988",
"0.5646653",
"0.5506841",
"0.5362173",
"0.5297846",
"0.5296241",
"0.52926576",
"0.5283197",
"0.5271365",
"0.5257141",
"0.52229846",
"0.52151346",
"0.5209855",
"0.5195071",
"0.51758224",
"0.5170469",
"0.51465285",
"0.5139739",
"0.51376873",
"0.513386",
"0.51118773",
"0.5111145",
"0.51056004",
"0.5094463"
]
| 0.6129063 | 1 |
Converts a ListArray to a dense tensor. | def _ListArrayToTensor(
self, list_array: pa.Array,
produce_eager_tensors: bool) -> Union[np.ndarray, tf.Tensor]:
values = list_array.flatten()
batch_size = len(list_array)
expected_num_elements = batch_size * self._unbatched_flat_len
if len(values) != expected_num_elements:
raise ValueError(
"Unable to convert a {} to a tensor of type spec {}: size mismatch. "
"Expected {} elements but got {}. "
"If your data type is tf.Example, make sure that the feature "
"is always present, and have the same length in all the examples. "
"TFX users should make sure there is no data anomaly for the feature."
.format(
type(list_array), self.type_spec, expected_num_elements,
len(values)))
actual_shape = list(self._shape)
actual_shape[0] = batch_size
if self._convert_to_binary_fn is not None:
values = self._convert_to_binary_fn(values)
values_np = np.asarray(values).reshape(actual_shape)
if produce_eager_tensors:
return tf.convert_to_tensor(values_np)
return values_np | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sparse_to_dense(self, tensor: tf.Tensor, output_shape: tf.TensorShape) -> tf.Tensor:\n return tf.scatter_nd(self.observations_index, tensor, output_shape)",
"def dense_to_sparse(self, tensor: tf.Tensor) -> tf.Tensor:\n tensor_shape = tensor.shape\n expand_dims = len(tensor_shape) == 3\n\n tensor = tf.gather_nd(tf.reshape(tensor, (-1, 1)), self.observations_index)\n if expand_dims:\n tensor = tf.expand_dims(tensor, axis=-1)\n return tensor",
"def to_dense(tensor):\n if is_sparse(tensor):\n return sparse_ops.sparse_tensor_to_dense(tensor)\n else:\n return tensor",
"def list_to_tensor(data):\n return torch.as_tensor(data, dtype=torch.float32, device=global_device())",
"def np2tensor(array, device=None):\n tensor = torch.from_numpy(array)\n return tensor",
"def _from_numpy(array):\n return tf.constant(array)",
"def list_to_backend_type(data: List) -> TTensor:",
"def tt(ndarray):\n\n\tif not isinstance(ndarray, torch.Tensor):\n\n\t\tif not isinstance(ndarray, np.ndarray):\n\t\t\tndarray = np.array(ndarray)\n\n\t\tif torch.cuda.is_available():\n\t\t\tndarray = Variable(torch.from_numpy(ndarray).float().cuda(), requires_grad=False)\n\t\telse:\n\t\t\tndarray = Variable(torch.from_numpy(ndarray).float(), requires_grad=False)\n\n\treturn ndarray",
"def todense(self):\n d = np.zeros(self.shape)\n for index,value in zip(self.index, self.value):\n d[index] = value\n return d",
"def np2tensor(array, device_id=-1):\n tensor = torch.from_numpy(array)\n if device_id >= 0:\n tensor = tensor.cuda(device_id)\n return tensor",
"def list_to_tensor(liste) -> torch.Tensor:\n if isinstance(liste,torch.Tensor):\n return liste\n bs = len(liste)\n shape = liste[0].shape\n final_shape = (bs,*shape)\n tensor_eq = torch.empty(final_shape)\n for k in range(bs):\n tensor_eq[k] = liste[k]\n return tensor_eq",
"def feed_ndarray(dali_tensor, arr):\n assert dali_tensor.shape() == list(arr.size()), \\\n (\"Shapes do not match: DALI tensor has size {0}\"\n \", but PyTorch Tensor has size {1}\".format(dali_tensor.shape(), list(arr.size())))\n #turn raw int to a c void pointer\n c_type_pointer = ctypes.c_void_p(arr.data_ptr())\n dali_tensor.copy_to_external(c_type_pointer)\n return arr",
"def to_tvm_ndarray(a: List[np.ndarray]) -> List[tvm.nd.NDArray]:\n assert a is not None, \"Empty result cannot be converted to TVM NDArray\"\n return [tvm.nd.array(x) for x in a]",
"def np_to_torch(array):\n tensor = torch.from_numpy(array)\n if tensor.dtype != torch.float32:\n tensor = tensor.float()\n return tensor",
"def from_numpy_to_tensor(record, device_id):\n [X, A,\n mol_ids_rep, rep_ids_rep, iw_ids,\n last_append_mask,\n NX, NX_rep,\n action_0, actions,\n log_p] = record\n\n X = nd.array(X, ctx=mx.gpu(device_id), dtype='int32')\n A_sparse = []\n for A_i in A:\n if A_i.shape[0] == 0:\n A_sparse.append(None)\n else:\n # transpose may not be supported in gpu\n A_i = np.concatenate([A_i, A_i[:, [1, 0]]], axis=0)\n\n # construct csr matrix ...\n data = np.ones((A_i.shape[0], ), dtype=np.float32)\n row, col = A_i[:, 0], A_i[:, 1]\n A_sparse_i = nd.sparse.csr_matrix((data, (row, col)),\n shape=tuple([int(X.shape[0]), ]*2),\n ctx=mx.gpu(device_id),\n dtype='float32')\n\n # append to list\n A_sparse.append(A_sparse_i)\n\n batch_size, iw_size = np.asscalar(mol_ids_rep.max() + 1), \\\n np.asscalar(rep_ids_rep.max() + 1)\n\n mol_ids_rep, rep_ids_rep, iw_ids, \\\n last_append_mask, \\\n NX, NX_rep, action_0, actions = [nd.array(_x, ctx=mx.gpu(device_id), dtype='int32')\n for _x in [mol_ids_rep, rep_ids_rep, iw_ids,\n last_append_mask,\n NX, NX_rep, action_0, actions]]\n\n log_p = nd.array(log_p, ctx=mx.gpu(device_id), dtype='float32')\n\n record = [X, A_sparse, iw_ids, last_append_mask,\n NX, NX_rep, action_0, actions, log_p,\n batch_size, iw_size]\n\n\n return record",
"def from_dense_rows(cls,\n dense_rows: Iterable[np.ndarray]) -> 'VarLenTensorValue':\n rows = []\n index_arrays = []\n max_row_len = 0\n num_rows = 0\n for i, row in enumerate(dense_rows):\n num_rows += 1\n if row.size:\n if row.ndim <= 1:\n # Add a dimension for unsized numpy array. This will solve the problem\n # where scalar numpy arrays like np.array(None), np.array(0) can not\n # be merged with other numpy arrays.\n row = row.reshape(-1)\n rows.append(row)\n else:\n raise ValueError(\n 'Each non-empty dense row should be 1D or scalar but'\n f' found row with shape {row.shape}.'\n )\n index_arrays.append(np.array([[i, j] for j in range(len(row))]))\n max_row_len = max(max_row_len, row.size)\n if index_arrays:\n values = np.concatenate(rows, axis=0)\n indices = np.concatenate(index_arrays, axis=0)\n else:\n # empty case\n values = np.array([])\n indices = np.empty((0, 2))\n dense_shape = np.array([num_rows, max_row_len])\n return cls.__new__(\n cls, values=values, indices=indices, dense_shape=dense_shape)",
"def n2t(*arrays):\n data_logger.info('n2t({}:{}{})'\n .format(type(arrays).__name__, len(arrays),\n tuple(type(a).__name__ for a in arrays)))\n if len(arrays) == 1:\n return list(map(torch.tensor, (arrays[0],)))[0]\n return list(map(torch.tensor, arrays))",
"def expand_dim_for_tensor_list(tensor_list, dim_array):\n res_tensor_list = []\n for tensor in tensor_list:\n res_tensor = tensor\n for dim in dim_array:\n res_tensor = tf.expand_dims(res_tensor, dim)\n res_tensor_list.append(res_tensor)\n\n return res_tensor_list",
"def _to_dense(self: QGTOnTheFlyT) -> jnp.ndarray:\n Npars = nkjax.tree_size(self._params)\n I = jax.numpy.eye(Npars)\n\n if self._chunking:\n # the linear_call in mat_vec_chunked does currently not have a jax batching rule,\n # so it cannot be vmapped but we can use scan\n # which is better for reducing the memory consumption anyway\n _, out = jax.lax.scan(lambda _, x: (None, self @ x), None, I)\n else:\n out = jax.vmap(lambda x: self @ x, in_axes=0)(I)\n\n if jnp.iscomplexobj(out):\n out = out.T\n\n return out",
"def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)",
"def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)",
"def dense(self, input_tensor, units, name, activation='linear'):\n with tf.variable_scope('Dense', name):\n init_w = tf.truncated_normal(shape=(self.num_channels(input_tensor),\n units), dtype=tf.float32)\n w = tf.Variable(init_w)\n b = tf.Variable(tf.random_uniform(shape=(units, ), dtype=tf.float32))\n\n out_layer = tf.matmul(input_tensor, w) + b\n out_layer = self.activations[activation](out_layer)\n return out_layer",
"def dense_to_one_hot(label_dense,num_classes=2): # scalars 标量 一个one-hot向量除了某一位的数字是1以外其余各维度数字都是0\n num_labels = label_dense.shape[0]\n index_offset = np.arange(num_labels)*num_classes # idnex_offset该下标表表示的是一维时候每个labels的对应下标 arange一个参数时,参数值为终点,起点取默认值0,步长取默认值1\n labels_one_hot = np.zeros((num_labels,num_classes))\n labels_one_hot.flat[index_offset+label_dense.ravel()] = 1 # 对one_hot矩阵的指定的位置进行赋值1的操作 index_offset+labels_dense.ravel() 得到的是一个下标 flat属性返回的是一个array的遍历对象,此时它是一维形式的 ravel()返回的是一个副本,但是这个副本是原来数据的引用,有点类似于c++的引用。主要是减少存储空间的使用。返回的也是一个一维形式的数据\n return labels_one_hot",
"def _nd4j_datatype_from_np_array(array):\n return _nd4j_datatype_from_np(array.dtype.name)",
"def _dense_to_one_hot(self, labels_dense, num_classes=10):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot",
"def from_dense(cls, dense: Float[Array, \"N N\"]) -> \"ConstantDiagonalLinearOperator\":\n return ConstantDiagonalLinearOperator(\n value=jnp.atleast_1d(dense[0, 0]), size=dense.shape[0]\n )",
"def to_matrix(array):\n return Matrix(array.tolist())",
"def DenseToOneHot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot",
"def array2var(array):\n if array.ndim == 3:\n array = array.transpose((2, 0, 1))\n var = Variable(torch.from_numpy(array))\n while len(var.size()) < 4:\n var = var.unsqueeze(0)\n return var",
"def dense_to_one_hot(self, labels_dense, num_classes):\n return np.eye(num_classes)[labels_dense]"
]
| [
"0.6261433",
"0.60625476",
"0.5917142",
"0.58886254",
"0.5838281",
"0.57698417",
"0.5738672",
"0.56580937",
"0.5619259",
"0.55969185",
"0.5541283",
"0.5505446",
"0.54937375",
"0.5480769",
"0.5433032",
"0.54126364",
"0.53512746",
"0.5347285",
"0.53289104",
"0.53236985",
"0.53236985",
"0.5310624",
"0.53042346",
"0.52730286",
"0.52611583",
"0.52516866",
"0.52335584",
"0.5230986",
"0.5212557",
"0.5204194"
]
| 0.6430456 | 0 |
Builds type handlers according to TensorRepresentations. | def _BuildTypeHandlers(
tensor_representations: Dict[str, schema_pb2.TensorRepresentation],
arrow_schema: pa.Schema) -> List[Tuple[str, _TypeHandler]]:
result = []
for tensor_name, rep in tensor_representations.items():
potential_handlers = _TYPE_HANDLER_MAP.get(rep.WhichOneof("kind"))
if not potential_handlers:
raise ValueError("Unable to handle tensor {} with rep {}".format(
tensor_name, rep))
found_handler = False
for h in potential_handlers:
if h.CanHandle(arrow_schema, rep):
found_handler = True
result.append((tensor_name, h(arrow_schema, rep)))
break
if not found_handler:
raise ValueError("Unable to handle tensor {} with rep {} "
"against schema: {}".format(tensor_name, rep,
arrow_schema))
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def TensorRepresentations(self) -> tensor_adapter.TensorRepresentations:",
"def build(self, input_tensors, is_training, lengths=None, hparams=None):",
"def _build_tensor_info(tensor_dict):\n return {\n k: tf.compat.v1.saved_model.utils.build_tensor_info(t)\n for k, t in tensor_dict.items()\n }",
"def create_operands_and_descriptors(handle, wrapped_operands, size_dict, inputs, outputs, mid_extent, method, device_id, stream_ctx, logger):\n # Create input tensor descriptors, output operands and output tensor descriptors\n output_class = wrapped_operands[0].__class__\n dtype_name = wrapped_operands[0].dtype\n\n # Compute extents for the outputs\n shared_mode_out = list(set(outputs[0]) & set(outputs[1]))[0]\n output_extents = [tuple(size_dict[m] if m != shared_mode_out else mid_extent for m in modes) for modes in outputs]\n \n logger.debug(\"Creating input tensor descriptors.\")\n input_tensor_descriptors = []\n output_tensor_descriptors = []\n try:\n for (t, modes) in zip(wrapped_operands, inputs):\n input_tensor_descriptors.append(t.create_tensor_descriptor(handle, modes))\n logger.debug(\"The input tensor descriptors have been created.\")\n # Create the output in the context of the current stream to work around a performance issue with CuPy's memory pool. \n logger.debug(\"Beginning output tensors and descriptors creation...\")\n s = None\n s_ptr = 0\n output_operands = []\n with utils.device_ctx(device_id):\n for extent, tensor_modes in zip(output_extents, outputs):\n operand = utils.create_empty_tensor(output_class, extent, dtype_name, device_id, stream_ctx) \n output_operands.append(operand)\n output_tensor_descriptors.append(operand.create_tensor_descriptor(handle, tensor_modes))\n \n if hasattr(method, 'partition') and method.partition is None:\n if dtype_name in ['float32', 'complex64']:\n s_dtype_name = 'float32'\n elif dtype_name in ['float64', 'complex128']:\n s_dtype_name = 'float64'\n else:\n raise ValueError(f\"{dtype_name} data type not supported\")\n s = utils.create_empty_tensor(output_class, (mid_extent, ), s_dtype_name, device_id, stream_ctx)\n s_ptr = s.data_ptr\n logger.debug(\"The output tensors and descriptors have been created.\")\n except: \n _destroy_tensor_descriptors(input_tensor_descriptors)\n _destroy_tensor_descriptors(output_tensor_descriptors)\n raise\n\n return input_tensor_descriptors, output_operands, output_tensor_descriptors, s, s_ptr",
"def _build_all_models(self):\r\n self.output_tensors = {}\r\n self.loss_terms = {}\r\n self.metrics = {}\r\n\r\n def _build_datasource_summaries(data_sources, mode):\r\n \"\"\"Register summary operations for input data from given data sources.\"\"\"\r\n with tf.variable_scope('%s_data' % mode):\r\n for data_source_name, data_source in data_sources.items():\r\n tensors = data_source.output_tensors\r\n for key, tensor in tensors.items():\r\n summary_name = '%s/%s' % (data_source_name, key)\r\n shape = tensor.shape.as_list()\r\n num_dims = len(shape)\r\n if num_dims == 4: # Image data\r\n if shape[1] == 1 or shape[1] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_first')\r\n elif shape[3] == 1 or shape[3] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_last')\r\n # TODO: fix issue with no summary otherwise\r\n elif num_dims == 2:\r\n self.summary.histogram(summary_name, tensor)\r\n else:\r\n logger.debug('I do not know how to create a summary for %s (%s)' %\r\n (summary_name, tensor.shape.as_list()))\r\n\r\n def _build_train_or_test(mode):\r\n data_sources = self._train_data if mode == 'train' else self._test_data\r\n\r\n # Build model\r\n output_tensors, loss_terms, metrics = self.build_model(data_sources, mode=mode)\r\n\r\n # Record important tensors\r\n self.output_tensors[mode] = output_tensors\r\n self.loss_terms[mode] = loss_terms\r\n self.metrics[mode] = metrics\r\n\r\n # Create summaries for scalars\r\n if mode == 'train':\r\n for name, loss_term in loss_terms.items():\r\n self.summary.scalar('loss/%s/%s' % (mode, name), loss_term)\r\n for name, metric in metrics.items():\r\n self.summary.scalar('metric/%s/%s' % (mode, name), metric)\r\n\r\n # Build the main model\r\n if len(self._train_data) > 0:\r\n _build_datasource_summaries(self._train_data, mode='train')\r\n _build_train_or_test(mode='train')\r\n logger.info('Built model.')\r\n\r\n # Print no. of parameters and lops\r\n flops = tf.profiler.profile(\r\n options=tf.profiler.ProfileOptionBuilder(\r\n tf.profiler.ProfileOptionBuilder.float_operation()\r\n ).with_empty_output().build())\r\n logger.info('------------------------------')\r\n logger.info(' Approximate Model Statistics ')\r\n logger.info('------------------------------')\r\n logger.info('FLOPS per input: {:,}'.format(flops.total_float_ops / self._batch_size))\r\n logger.info(\r\n 'Trainable Parameters: {:,}'.format(\r\n np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()])\r\n )\r\n )\r\n logger.info('------------------------------')\r\n\r\n # If there are any test data streams, build same model with different scope\r\n # Trainable parameters will be copied at test time\r\n if len(self._test_data) > 0:\r\n _build_datasource_summaries(self._test_data, mode='test')\r\n with tf.variable_scope('test'):\r\n _build_train_or_test(mode='test')\r\n logger.info('Built model for live testing.')\r\n\r\n if self._enable_live_testing:\r\n self._tester._post_model_build() # Create copy ops to be run before every test run\r",
"def _regular_build(self):\n # This overwrites define_model, is that ok?\n self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101\n self.define_model,\n create_scope_now_=True)\n\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n\n # TODO Move clean and summary to proper section\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(\n tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(\n tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(\n tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(\n tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n\n self.writer = tf.summary.FileWriter(self.output_path,\n self.session.graph)\n self.saver = tf.train.Saver()\n # TODO Add routine to save\n logging.info('Model construction complete.')",
"def _setup_type_shapes(self, named_ops, extra_type_shapes):\n type_shape_set = set()\n for op in six.itervalues(named_ops):\n type_shape_set.update(op.input_type_shapes)\n type_shape_set.update(op.output_type_shapes)\n if extra_type_shapes is not None:\n type_shape_set.update(extra_type_shapes)\n\n # _type_shapes: a list of all the typeshapes this loom object supports.\n self._type_shapes = sorted(type_shape_set)\n\n # Enforce uniqueness for non-empty TypeShape tags.\n non_empty_tags = set()\n for ts in self._type_shapes:\n if ts.tag:\n if ts.tag in non_empty_tags:\n raise TypeError('Tags on tagged TypeShapes must be unique; '\n '%s occured more than once.' % (ts.tag,))\n else:\n non_empty_tags.add(ts.tag)\n\n # _type_shape_to_idx: a dict mapping TypeShape objects to their indices in\n # '_type_shapes'.\n self._type_shape_to_idx = {ts: idx for idx, ts in\n enumerate(self._type_shapes)}",
"def build(self):\n self.build_inputs()\n self.build_word_embeddings()\n self.build_encoder()\n self.build_fc()\n self.build_loss()\n self.build_global_step()",
"def _setup_network(self):\n if self._dry_run:\n self._output = [tf.constant(np.zeros((1,)+ts.shape, dtype=ts.dtype))\n for ts in self._type_shapes]\n return\n\n if self._direct_feed_dict:\n self._arg_wiring_concat = tf.placeholder(\n TENSOR_IDX_T, name='arg_wiring_concat')\n self._arg_wiring_slice_starts = tf.placeholder(\n TENSOR_IDX_T, name='arg_wiring_slice_starts')\n self._arg_wiring_slice_sizes = tf.placeholder(\n TENSOR_IDX_T, name='arg_wiring_slice_sizes')\n self._output_wirings = [\n tf.placeholder(TENSOR_IDX_T, name='output_wirings_%d' % ts_idx)\n for ts_idx in xrange(len(self._type_shapes))]\n self._constants = [\n tf.placeholder(ts.dtype, name='constants_%d' % ts_idx)\n for ts_idx, ts in enumerate(self._type_shapes)]\n else:\n # See REGISTER_WEAVER_OP in weaver_op_base.h for the definitions of the\n # outputs in the destructuring assignment below.\n (self._arg_wiring_concat,\n self._arg_wiring_slice_starts,\n self._arg_wiring_slice_sizes,\n self._output_wirings,\n self._constants) = self._weaver_op(\n metadata=self._loom_metadata_str,\n constant_types=[tf.as_dtype(ts.dtype) for ts in self._type_shapes],\n num_type_shapes=len(self._type_shapes))\n # _arg_wiring_concat: an integer vector Tensor containing all the wirings\n # for the current schedule concatenated together. They are sorted\n # lexically, by (depth, op_idx, arg_idx). This means that\n # _arg_wiring_concat consists of max_depth*self._loom_total_args, vectors\n # concatenated together. (Here max_depth refers to the final max_depth of\n # the emulated graph, not -1 in the event that the Loom was instantiated\n # with a while_loop.)\n #\n # _arg_wiring_slice_starts and _arg_wiring_slice_sizes: these are integer\n # vector Tensors of length max_depth*self._loom_total_args that specify how\n # to split _arg_wiring_concat back apart into wirings for each (depth,\n # op_idx, arg_idx).\n #\n # The rationale for concatenating all the wiring diagrams together\n # like this is that in order to support tf.while_loop, we need to create a\n # tensor which produces the appropriate wiring diagram in a way that depends\n # on the current depth (this is accomplished using tf.slice in\n # _construct_loom_layer.)\n #\n # _output_wirings: A list of integer vector Tensors, one for each TypeShape.\n # These vectors select which elements of the final state tensor end up in\n # the Loom's `output_tensor`s.\n #\n # _constants: A list of Tensors, one for each TypeShape. Each of these\n # Tensors should have the dtype of the corresponding TypeShape. The\n # contents should be the stacked set of constants declared for that\n # TypeShape.\n\n # For each TypeShape, if it's in batched input mode, we use the user\n # provided tensor as the input. Otherwise, we take the constants from the\n # weaver.\n inputs = self._constants\n for ts_idx, ts in enumerate(self._type_shapes):\n if ts in self._batch_inputs:\n inputs[ts_idx] = self._batch_inputs[ts]\n\n # iteration of building up the graph, state will contain tensors\n # whose rows will be the objects passed from each depth to the next one of\n # the appropriate shapes.\n state = []\n for inputs_tensor, named_tensors in (\n zip(inputs, self._ts_idx_to_named_tensors)):\n if not named_tensors:\n state.append(inputs_tensor)\n else:\n state.append(tf.concat([tf.stack(named_tensors), inputs_tensor], 0))\n\n # This block builds up the static graph that consumes Loom's wiring\n # diagrams and emulates the dynamic network.\n #\n # Note: the code that computes wiring diagrams lives in scheduler.cc for\n # efficiency reasons.\n if self._max_depth == -1: # For dynamic max_depth we use tf.while.\n current_max_depth = (\n tf.size(self._arg_wiring_slice_starts) // self._loom_total_args)\n def loop_conditional(depth, *unused_state):\n return tf.less_equal(depth, current_max_depth)\n def loop_body(depth, *state):\n new_depth = tf.add(depth, 1, name='increment_depth')\n new_state = self._construct_loom_layer(depth, state)\n return [new_depth] + new_state\n initial_depth = tf.constant(1, name='initial_depth')\n state = tf.while_loop(loop_conditional, loop_body,\n [initial_depth] + state,\n parallel_iterations=self._parallel_iterations,\n back_prop=self._back_prop,\n swap_memory=self._swap_memory)[1:]\n else: # For explicit max_depth we unroll the loop.\n for depth in xrange(1, self._max_depth+1):\n with tf.name_scope('loom_depth_%03d' % depth):\n state = self._construct_loom_layer(depth, state)\n\n # _output: The output tensors of the loom, indexed by TypeShape.\n with tf.name_scope('output_gathers'):\n self._output = [\n tf.gather(s, w, name=self._type_shapes[ts_idx].tensor_flow_name())\n for ts_idx, (s, w) in enumerate(zip(state, self._output_wirings))]\n\n # Make sure the output tensors know what shape they're supposed to be.\n for type_shape, output in zip(self._type_shapes, self._output):\n output.set_shape((None,) + type_shape.shape)",
"def build_inputs(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n\n elif self.mode == \"test\":\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n \n else:\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.input_queue_capacity,\n num_reader_threads=self.config.num_input_reader_threads)\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(self.config.batch_size)\n s1, s2, label = input_ops.parse_example_batch(\n serialized)\n\n encode_ids1 = s1.ids\n encode_ids2 = s2.ids\n\n encode_mask1 = s1.mask\n encode_mask2 = s2.mask\n \n\n\n self.encode_ids1 = encode_ids1\n self.encode_ids2 = encode_ids2\n\n self.encode_mask1 = encode_mask1\n self.encode_mask2 = encode_mask2\n\n self.label = label",
"def build(self, input_shape):\n node_embed_shape = input_shape.node_embed\n edge_embed_shape = input_shape.edge_embed\n\n with tf.name_scope('node'):\n with tf.name_scope('U'):\n self.U = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.U.build(node_embed_shape)\n\n with tf.name_scope('V'):\n self.V = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.V.build(node_embed_shape)\n\n with tf.name_scope('norm'):\n self.norm_h = {\n \"batch\": tf.keras.layers.BatchNormalization(),\n \"layer\": tf.keras.layers.LayerNormalization()\n }.get(self.normalization, None)\n if self.norm_h:\n self.norm_h.build(node_embed_shape)\n\n with tf.name_scope('edge'):\n with tf.name_scope('A'):\n self.A = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.A.build(edge_embed_shape)\n \n with tf.name_scope('B'):\n self.B = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.B.build(node_embed_shape)\n\n with tf.name_scope('C'):\n self.C = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.C.build(node_embed_shape)\n\n with tf.name_scope('norm'):\n self.norm_e = {\n 'batch': tf.keras.layers.BatchNormalization(),\n 'layer': tf.keras.layers.LayerNormalization(axis=-1)\n }.get(self.normalization, None)\n if self.norm_e:\n self.norm_e.build(edge_embed_shape)\n \n super().build(input_shape)",
"def test_op_apply_types(self) -> None:\n\n op_add_1 = OpLambda(func=lambda x: x + 1, func_reverse=lambda x: x - 1)\n op_mul_2 = OpLambda(func=lambda x: x * 2, func_reverse=lambda x: x // 2)\n op_mul_4 = OpLambda(func=lambda x: x * 4, func_reverse=lambda x: x // 4)\n\n sample_dict = NDict({})\n sample_dict[\"data.val.img_for_testing\"] = 3\n sample_dict[\"data.test.img_for_testing\"] = 3\n sample_dict[\"data.test.seg_for_testing\"] = 3\n sample_dict[\"data.test.bbox_for_testing\"] = 3\n sample_dict[\"data.test.meta\"] = 3\n\n types_dict = {\n DataTypeForTesting.IMAGE_FOR_TESTING: (op_add_1, dict()),\n DataTypeForTesting.SEG_FOR_TESTING: (op_mul_2, dict()),\n DataTypeForTesting.BBOX_FOR_TESTING: (op_mul_4, dict()),\n }\n\n op_apply_type = OpApplyTypesImaging(types_dict)\n\n sample_dict = op_apply_type(sample_dict, \"_.test_apply_type\")\n self.assertEqual(sample_dict[\"data.val.img_for_testing\"], 4)\n self.assertEqual(sample_dict[\"data.test.img_for_testing\"], 4)\n self.assertEqual(sample_dict[\"data.test.seg_for_testing\"], 6)\n self.assertEqual(sample_dict[\"data.test.bbox_for_testing\"], 12)\n self.assertEqual(sample_dict[\"data.test.meta\"], 3)\n\n sample_dict[\"model.a_seg_for_testing\"] = 3\n op_apply_type.reverse(\n sample_dict,\n key_to_follow=\"data.val.img_for_testing\",\n key_to_reverse=\"model.a_seg_for_testing\",\n op_id=\"_.test_apply_type\",\n )\n self.assertEqual(sample_dict[\"data.val.img_for_testing\"], 4)\n self.assertEqual(sample_dict[\"model.a_seg_for_testing\"], 2)",
"def build(self, input_shapes):\n (word_embeddings_shape, _) = input_shapes\n width = word_embeddings_shape.as_list()[-1]\n self.type_embeddings = None\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"type_embeddings\",\n shape=[self.token_type_vocab_size, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super(EmbeddingPostprocessor, self).build(input_shapes)",
"def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()",
"def build(\n self, input_shape\n ):\n\n if isinstance(input_shape, tuple):\n expert_shapes, routing_input_shape = input_shape\n else:\n expert_shapes, routing_input_shape = input_shape, None\n num_experts = len(expert_shapes)\n # num_binary is the number of binary vars required to encode the\n # num_experts choices.\n self._num_binary = math.ceil(math.log2(num_experts))\n # Boolean to check if num_experts is a power of 2.\n self._power_of_2 = (num_experts == 2**self._num_binary)\n if routing_input_shape is None:\n # z_logits is a trainable 3D tensor used for selecting the experts.\n # Axis 0: Number of non-zero experts to select.\n # Axis 1: Dummy axis of length 1 used for broadcasting.\n # Axis 2: Each num_binary-dimensional row corresponds to a \"single-expert\"\n # selector.\n self._z_logits = self.add_weight(\n name=\"z_logits\",\n shape=(self._num_nonzeros, 1, self._num_binary),\n initializer=self._z_initializer,\n trainable=True)\n # w_logits is a trainable tensor used to assign weights to the\n # single-expert selectors. Each element of w_logits is a logit.\n self._w_logits = self.add_weight(\n name=\"w_logits\",\n shape=(self._num_nonzeros, 1),\n initializer=self._w_initializer,\n trainable=True)\n else:\n self._z_logits = tf.keras.layers.Dense(\n self._num_nonzeros * self._num_binary,\n kernel_initializer=self._z_initializer,\n bias_initializer=self._z_initializer)\n self._w_logits = tf.keras.layers.Dense(\n self._num_nonzeros,\n kernel_initializer=self._w_initializer,\n bias_initializer=self._w_initializer)\n # binary_matrix is a (num_experts, num_binary)-matrix used for binary\n # encoding. The i-th row contains a num_binary-digit binary encoding of the\n # integer i.\n binary_matrix = np.array([\n list(np.binary_repr(val, width=self._num_binary))\n for val in range(num_experts)\n ]).astype(bool)\n # A constant tensor = binary_matrix, with an additional dimension for\n # broadcasting.\n self._binary_codes = tf.expand_dims(\n tf.constant(binary_matrix, dtype=bool), axis=0)\n self.built = True",
"def _build(self, generation):\n with tf.variable_scope ('discriminator') as scope:\n \n image_unflatten = unflatten_layer ( self.images )\n gen_unflatten = unflatten_layer ( generation )\n\n # Conv Layer 1 - image\n conv1_out_image, params = conv_2d_layer (\n input = image_unflatten,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n name = 'conv_1_img',\n visualize = True ) \n pool1_out_img = max_pool_2d_layer ( input = conv1_out_image, name = 'pool_1_img')\n lrn1_out_img = local_response_normalization_layer (pool1_out_img, name = 'lrn_1_img' ) \n \n # Conv Layer 1 - gen\n conv1_out_gen, params = conv_2d_layer (\n input = gen_unflatten,\n neurons = CONV_1_N,\n filter_size = CONV_1_FILT,\n params = params,\n name = 'conv_1_gen',\n visualize = False )\n\n pool1_out_gen = max_pool_2d_layer ( input = conv1_out_gen, name = 'pool_1_gen')\n lrn1_out_gen = local_response_normalization_layer (pool1_out_gen, name = 'lrn_1_gen' ) \n process_params(params, name = self.name)\n c1_params = params\n\n\n\n\n\n # Conv Layer 2 - image\n conv2_out_image, params = conv_2d_layer (\n input = lrn1_out_img,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n name = 'conv_2_img' )\n\n pool2_out_img = max_pool_2d_layer ( input = conv2_out_image, name = 'pool_2_img')\n lrn2_out_img = local_response_normalization_layer (pool2_out_img, name = 'lrn_2_img' ) \n\n\n # Conv Layer 2 - gen\n conv2_out_gen, params = conv_2d_layer (\n input = lrn1_out_gen,\n neurons = CONV_2_N,\n filter_size = CONV_2_FILT,\n params = params,\n name = 'conv_2_gen' )\n\n pool2_out_gen = max_pool_2d_layer ( input = conv2_out_gen, name = 'pool_2_gen')\n lrn2_out_gen = local_response_normalization_layer (pool2_out_gen, name = 'lrn_2_gen' ) \n process_params(params, name = self.name)\n c2_params = params\n\n # Dropout Layer\n flat_gen = flatten_layer(lrn2_out_gen)\n flat_img = flatten_layer(lrn2_out_img)\n\n flat_gen_dropout = dropout_layer ( input = flat_gen,\n prob = self.dropout_prob,\n name = 'dropout_1_gen') \n\n flat_img_dropout = dropout_layer ( input = flat_img,\n prob = self.dropout_prob,\n name = 'dropout_1_img') \n\n\n\n # Dot Product Layer 1 -img\n fc1_out_img, params = dot_product_layer ( input = flat_img_dropout,\n neurons = HIDDEN_1,\n name = 'image_disc_dot_1')\n # Dot Product Layer 1 - gen\n fc1_out_gen, params = dot_product_layer ( input = flat_gen_dropout,\n params = params,\n neurons = HIDDEN_2,\n name = 'gen_disc_dot_1')\n\n process_params(params, name = self.name)\n d1_params = params\n \n ##\n fc1_out_gen_dropout = dropout_layer ( input = fc1_out_gen,\n prob = self.dropout_prob,\n name = 'dropout_2_gen') \n fc1_out_img_dropout = dropout_layer ( input = fc1_out_img,\n prob = self.dropout_prob,\n name = 'dropout_2_img')\n\n # Dot Product Layer 2 -img\n fc2_out_img, params = dot_product_layer ( input = fc1_out_img_dropout,\n neurons = HIDDEN_2,\n name = 'image_disc_dot_2')\n # Dot Product Layer 2 - gen\n fc2_out_gen, params = dot_product_layer ( input = fc1_out_gen_dropout,\n params = params,\n neurons = HIDDEN_2,\n name = 'gen_disc_dot_2')\n process_params(params, name = self.name)\n d2_params = params\n\n ##\n fc2_out_gen_dropout = dropout_layer ( input = fc2_out_gen,\n prob = self.dropout_prob,\n name = 'dropout_3_gen') \n fc2_out_img_dropout = dropout_layer ( input = fc2_out_img,\n prob = self.dropout_prob,\n name = 'dropout_3_img')\n\n # Dot Product Layer 1 -img\n self.real, params = dot_product_layer ( input = fc2_out_img_dropout,\n neurons = 1,\n activation = 'sigmoid',\n name = 'real')\n # Dot Product Layer 1 -gen\n self.fake, params = dot_product_layer ( input = fc2_out_gen_dropout,\n params = params,\n neurons = 1,\n activation = 'sigmoid',\n name = 'fake')\n\n process_params(params, name = self.name)\n d3_params = params\n self.params = [c1_params, c2_params, d1_params, d2_params, d3_params] \n\n\n with tf.variable_scope (self.name + '_objectives') as scope: \n with tf.variable_scope( self.name + 'discriminator_obj') as scope: \n # discriminator_obj = - 0.5 * tf.reduce_mean(log(self.real)) - \\\n # 0.5 * tf.reduce_mean(log(1-self.fake))\n discriminator_obj = 0.5 * tf.reduce_mean ((self.real-1)**2) + \\\n 0.5 * tf.reduce_mean ((self.fake)**2)\n tf.summary.scalar('discriminator_obj', discriminator_obj)\n tf.add_to_collection( self.name + '_objectives', discriminator_obj ) \n\n with tf.variable_scope (self.name + '_probabilites') as scope: \n tf.summary.scalar('fake_probability', tf.reduce_mean(self.fake))\n tf.summary.scalar('real_probability', tf.reduce_mean(self.real))\n \n self._cook_optimizer( \n lr = DIS_GAN_LR, \n optimizer = DIS_GAN_OPTIMIZER,\n l1_coeff = DIS_GAN_L1_COEFF,\n l2_coeff = DIS_GAN_WEIGHT_DECAY_COEFF)",
"def _set_shapes(self, batch_size, features_in, labels_in):\n features_in['mcts_features'] = tf.reshape(\n features_in['mcts_features'], [batch_size, self._env_state_space],\n name='mcts_feature_reshape')\n\n features_in['policy_features'] = tf.reshape(\n features_in['policy_features'], [batch_size, self._env_state_space],\n name='policy_feature_reshape')\n\n labels_in['action_tensor'] = tf.reshape(\n labels_in['action_tensor'], [batch_size, self._env_action_space],\n name='action_reshape')\n\n labels_in['mean_tensor'] = tf.reshape(\n labels_in['mean_tensor'], [batch_size, self._env_action_space],\n name='mean_reshape')\n\n labels_in['logstd_tensor'] = tf.reshape(\n labels_in['logstd_tensor'], [batch_size, self._env_action_space],\n name='logstd_reshape')\n\n labels_in['value_tensor'] = tf.reshape(\n labels_in['value_tensor'], [batch_size], name='value_reshape')\n\n labels_in['return_tensor'] = tf.reshape(\n labels_in['return_tensor'], [batch_size], name='return_reshape')\n\n labels_in['old_neg_logprob_tensor'] = tf.reshape(\n labels_in['old_neg_logprob_tensor'], [batch_size], name='log_reshape')\n\n labels_in['mcts_enable_tensor'] = tf.reshape(\n labels_in['mcts_enable_tensor'], [batch_size], name='mcts_reshape')\n\n labels_in['policy_action_tensor'] = tf.reshape(\n labels_in['policy_action_tensor'], [batch_size, self._env_action_space],\n name='policy_action_reshape')\n\n labels_in['policy_value_tensor'] = tf.reshape(\n labels_in['policy_value_tensor'], [batch_size],\n name='policy_value_reshape')\n\n labels_in['policy_return_tensor'] = tf.reshape(\n labels_in['policy_return_tensor'], [batch_size],\n name='policy_return_reshape')\n\n labels_in['policy_old_neg_logprob_tensor'] = tf.reshape(\n labels_in['policy_old_neg_logprob_tensor'], [batch_size],\n name='log_reshape')\n\n return features_in, labels_in",
"def build_model():\n with tf.name_scope('placeholders'):\n real_data_int = tf.placeholder(tf.int32, shape=[None, picture_size])\n x_true = 2 * ((tf.cast(real_data_int, tf.float32) / 255.) - .5)\n z = tf.placeholder(tf.float32, [None, input_dim])\n if use_JL:\n JL = tf.placeholder(tf.float32, [d_last_layer_size, JL_dim])\n P_non_normalized = tf.placeholder(tf.float32, [JL_dim, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n else:\n JL = None\n P_non_normalized = tf.placeholder(tf.float32, [d_last_layer_size, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n\n x_generated = generator(z, n_features_first=n_features_first_g,\n n_features_reduction_factor=n_features_reduction_factor, min_features=64,\n BN=BN, power=power, extra_layer=extra_layer_g,\n init_method=init_method, n_features_image=n_features_image)\n\n d_pred_true, d_last_true = discriminator(x_true, reuse=False, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n d_pred_gen, d_last_gen = discriminator(x_generated, reuse=True, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n\n # define generator loss (big part taken from SWG)\n with tf.name_scope('g_loss'):\n # apply the Johnson-Lindenstrauss map, if wanted, to the flattened array\n if use_JL:\n JL_true = tf.matmul(d_last_true, JL)\n JL_gen = tf.matmul(d_last_gen, JL)\n else:\n JL_true = d_last_true\n JL_gen = d_last_gen\n\n # next project the samples (images). After being transposed, we have tensors\n # of the format: [[projected_image1_proj1, projected_image2_proj1, ...],\n # [projected_image1_proj2, projected_image2_proj2, ...],...]\n # Each row has the projections along one direction. This makes it easier for the sorting that follows.\n # first normalize the random normal vectors to lie in the sphere\n P = tf.nn.l2_normalize(P_non_normalized, axis=0)\n\n projected_true = tf.transpose(tf.matmul(JL_true, P))\n projected_fake = tf.transpose(tf.matmul(JL_gen, P))\n\n sorted_true, true_indices = tf.nn.top_k(input=projected_true, k=batch_size)\n sorted_fake, fake_indices = tf.nn.top_k(input=projected_fake, k=batch_size)\n\n # For faster gradient computation, we do not use sorted_fake to compute\n # loss. Instead we re-order the sorted_true so that the samples from the\n # true distribution go to the correct sample from the fake distribution.\n\n # It is less expensive (memory-wise) to rearrange arrays in TF.\n # Flatten the sorted_true from dim [n_projections, batch_size].\n flat_true = tf.reshape(sorted_true, [-1])\n\n # Modify the indices to reflect this transition to an array.\n # new index = row + index\n rows = np.asarray([batch_size * np.floor(i * 1.0 / batch_size) for i in range(n_projections * batch_size)])\n rows = rows.astype(np.int32)\n flat_idx = tf.reshape(fake_indices, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n # The scatter operation takes care of reshaping to the rearranged matrix\n shape = tf.constant([batch_size * n_projections])\n rearranged_true = tf.reshape(tf.scatter_nd(flat_idx, flat_true, shape), [n_projections, batch_size])\n\n generator_loss = tf.reduce_mean(tf.square(projected_fake - rearranged_true))\n\n # get the sliced Wasserstein distance (SWD) (since SWD and JLSWD are not comparable)\n with tf.name_scope('SWD'):\n P_SWD = tf.nn.l2_normalize(P_non_normalized_SWD, axis=0)\n\n projected_true_SWD = tf.transpose(tf.matmul(x_true, P_SWD))\n projected_fake_SWD = tf.transpose(tf.matmul(x_generated, P_SWD))\n\n sorted_true_SWD, true_indices_SWD = tf.nn.top_k(input=projected_true_SWD, k=batch_size)\n sorted_fake_SWD, fake_indices_SWD = tf.nn.top_k(input=projected_fake_SWD, k=batch_size)\n\n flat_true_SWD = tf.reshape(sorted_true_SWD, [-1])\n flat_idx_SWD = tf.reshape(fake_indices_SWD, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n rearranged_true_SWD = tf.reshape(tf.scatter_nd(flat_idx_SWD, flat_true_SWD, shape),\n [n_projections, batch_size])\n\n SWD = tf.reduce_mean(tf.square(projected_fake_SWD - rearranged_true_SWD))\n\n # define the discriminator loss\n with tf.name_scope('d_loss'):\n d_true_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_pred_true), logits=d_pred_true)\n d_fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_pred_gen), logits=d_pred_gen)\n discriminator_loss = tf.reduce_mean(d_true_loss + d_fake_loss)\n\n with tf.name_scope('g_optimizer'):\n generator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n g_train = g_optimizer.minimize(generator_loss, var_list=generator_vars)\n\n with tf.name_scope('d_optimizer'):\n discriminator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n d_train = d_optimizer.minimize(discriminator_loss, var_list=discriminator_vars)\n\n return real_data_int, z, x_generated, JL, P_non_normalized, P_non_normalized_SWD, SWD, g_train, d_train",
"def build_auditory_model(self, dtype=tf.float32):\n # Build placeholders for two waveforms and compute waveform loss\n self.tensor_wave0 = tf.placeholder(dtype, [None, 40000])\n self.tensor_wave1 = tf.placeholder(dtype, [None, 40000])\n print('Building waveform loss')\n self.loss_waveform = self.l1_distance(self.tensor_wave0, self.tensor_wave1)\n # Build cochlear model for each waveform and compute cochlear model loss\n print('Building cochlear model loss')\n tensor_coch0, _ = util_cochlear_model.build_cochlear_model(\n self.tensor_wave0,\n **self.config_cochlear_model)\n tensor_coch1, _ = util_cochlear_model.build_cochlear_model(\n self.tensor_wave1,\n **self.config_cochlear_model)\n self.loss_cochlear_model = self.l1_distance(tensor_coch0, tensor_coch1)\n # Build network(s) for each waveform and compute deep feature losses\n self.loss_deep_features_dict = {}\n self.loss_deep_features = tf.zeros([], dtype=dtype)\n for network_key in sorted(self.config_recognition_networks.keys()):\n print('Building deep feature loss (recognition network: {})'.format(network_key))\n with open(self.config_recognition_networks[network_key]['fn_arch'], 'r') as f:\n list_layer_dict = json.load(f)\n # Build network for stimulus 0\n with tf.variable_scope(network_key + '0') as scope:\n _, tensors_network0 = util_recognition_network.build_network(\n tensor_coch0,\n list_layer_dict,\n n_classes_dict=self.config_recognition_networks[network_key]['n_classes_dict'])\n var_list = {\n v.name.replace(scope.name + '/', '').replace(':0', ''): v\n for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope.name)\n }\n self.config_recognition_networks[network_key]['saver0'] = tf.train.Saver(\n var_list=var_list,\n max_to_keep=0)\n # Build network for stimulus 1\n with tf.variable_scope(network_key + '1') as scope:\n _, tensors_network1 = util_recognition_network.build_network(\n tensor_coch1,\n list_layer_dict,\n n_classes_dict=self.config_recognition_networks[network_key]['n_classes_dict'])\n var_list = {\n v.name.replace(scope.name + '/', '').replace(':0', ''): v\n for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope.name)\n }\n self.config_recognition_networks[network_key]['saver1'] = tf.train.Saver(\n var_list=var_list,\n max_to_keep=0)\n # Compute deep feature losses (weighted sum across layers)\n self.loss_deep_features_dict[network_key] = tf.zeros([], dtype=dtype)\n layer_weights = self.config_recognition_networks[network_key]['weights']\n for layer_key in sorted(layer_weights.keys()):\n tmp = self.l1_distance(tensors_network0[layer_key], tensors_network1[layer_key])\n self.loss_deep_features_dict[network_key] += layer_weights[layer_key] * tmp\n self.loss_deep_features += self.loss_deep_features_dict[network_key]",
"def get_data_types():\n return tf.float32, tf.float32, tf.int32",
"def compute_children_node_types_tensor(self, parent_node_embedding, children_index, node_type_dim):\n \n max_children = tf.shape(input=children_index)[2]\n batch_size = tf.shape(input=parent_node_embedding)[0]\n num_nodes = tf.shape(input=parent_node_embedding)[1]\n\n # replace the root node with the zero vector so lookups for the 0th\n # vector return 0 instead of the root vector\n # zero_vecs is (batch_size, num_nodes, 1)\n zero_vecs = tf.zeros((batch_size, 1, node_type_dim))\n # vector_lookup is (batch_size x num_nodes x node_dim)\n vector_lookup = tf.concat([zero_vecs, parent_node_embedding[:, 1:, :]], axis=1)\n # children is (batch_size x num_nodes x num_children x 1)\n children_index = tf.expand_dims(children_index, axis=3)\n # prepend the batch indices to the 4th dimension of children\n # batch_indices is (batch_size x 1 x 1 x 1)\n batch_index = tf.reshape(tf.range(0, batch_size), (batch_size, 1, 1, 1))\n # batch_indices is (batch_size x num_nodes x num_children x 1)\n batch_index = tf.tile(batch_index, [1, num_nodes, max_children, 1])\n # children is (batch_size x num_nodes x num_children x 2)\n children_index = tf.concat([batch_index, children_index], axis=3)\n # output will have shape (batch_size x num_nodes x num_children x node_type_dim)\n # NOTE: tf < 1.1 contains a bug that makes backprop not work for this!\n return tf.gather_nd(vector_lookup, children_index)",
"def list_to_backend_type(data: List) -> TTensor:",
"def _build(self):\n if self.attn:\n self.Attn = AttentionNet(self.dim_b1, channels=self.channels, name='Attn')\n self.predsb1 = self.Attn(self.xb1, is_training=self.is_training)\n self.predsb2 = self.Attn(self.xb2, is_training=self.is_training, reuse=True)\n #TODO: generators want to make their synthetics look like b1/b2 to attn model\n\n self.loss_attn = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predsb1, labels=tf.zeros_like(self.predsb1)))\n self.loss_attn += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predsb2, labels=tf.ones_like(self.predsb2)))\n\n self.attnb1 = tf.gradients(self.loss_attn, self.xb1)[0]\n self.attnb2 = tf.gradients(self.loss_attn, self.xb2)[0]\n\n self.attnb1 = tf.abs(self.attnb1)\n self.attnb1 = self.attnb1 / tf.reduce_sum(self.attnb1, axis=1, keep_dims=True)\n self.attnb1 = self.attnb1 / tf.reduce_max(self.attnb1, axis=1, keep_dims=True)\n\n self.attnb2 = tf.abs(self.attnb2)\n self.attnb2 = self.attnb2 / tf.reduce_sum(self.attnb2, axis=1, keep_dims=True)\n self.attnb2 = self.attnb2 / tf.reduce_max(self.attnb2, axis=1, keep_dims=True)\n\n self.attnb1 = nameop(self.attnb1, 'attnb1')\n self.attnb2 = nameop(self.attnb2, 'attnb2')\n\n self.G12 = GeneratorResnet(self.dim_b1, self.dim_b2, channels=self.channels, name='G12')\n self.Gb2 = self.G12(self.xb1, is_training=self.is_training)\n self.Gb2 = nameop(self.Gb2, 'Gb2')\n\n self.G21 = GeneratorResnet(self.dim_b2, self.dim_b1, channels=self.channels, name='G21')\n self.Gb1 = self.G21(self.xb2, is_training=self.is_training)\n self.Gb1 = nameop(self.Gb1, 'Gb1')\n\n\n self.Gb2_reconstructed = self.G12(self.Gb1, is_training=self.is_training, reuse=True)\n self.Gb1_reconstructed = self.G21(self.Gb2, is_training=self.is_training, reuse=True)\n\n self.Gb1_reconstructed = nameop(self.Gb1_reconstructed, 'xb1_reconstructed')\n self.Gb2_reconstructed = nameop(self.Gb2_reconstructed, 'xb2_reconstructed')\n\n self.D1 = Discriminator(self.dim_b1, 1, channels=self.channels, name='D1')\n self.D2 = Discriminator(self.dim_b2, 1, channels=self.channels, name='D2')\n\n self.D1_probs_z = self.D1(self.xb1, is_training=self.is_training)\n self.D1_probs_G = self.D1(self.Gb1, is_training=self.is_training, reuse=True)\n self.D1_probs_z = nameop(self.D1_probs_z, 'D1_probs_z')\n self.D1_probs_G = nameop(self.D1_probs_G, 'D1_probs_G')\n\n self.D2_probs_z = self.D2(self.xb2, is_training=self.is_training)\n self.D2_probs_G = self.D2(self.Gb2, is_training=self.is_training, reuse=True)\n self.D2_probs_z = nameop(self.D2_probs_z, 'D2_probs_z')\n self.D2_probs_G = nameop(self.D2_probs_G, 'D2_probs_G')\n\n self._build_loss()\n\n self._build_optimization()",
"def build(self,unused):\n # (word_embeddings_shape, _) = input_shapes\n # width = word_embeddings_shape.as_list()[-1]\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"token_type_embeddings\",\n shape=[self.token_type_vocab_size, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=LAYER_NORM_NAME, axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super().build(unused)",
"def build(self, input_tensors, is_training, lengths=None, hparams=None):\n input_tensor = input_tensors[-1]\n net = input_tensor\n while True:\n plate_dimension = net.get_shape()[2]\n if plate_dimension < self._kernel_size:\n break\n\n net = tf_slim.conv2d(\n net,\n min(get_channel_dim(net) * 2, self._max_channels),\n kernel_size=self._kernel_size,\n stride=self._strides,\n padding='same')\n net = tf.nn.leaky_relu(net)\n\n net = tf.keras.layers.Flatten(name='flatten')(net)\n return input_tensors + [net]",
"def from_dict(self, handler_dict):\n nb_clusters = handler_dict['nb_clusters']\n while len(self.clusters) < nb_clusters:\n self.clusters.append(C.Cluster())\n\n # Weights\n weight_dict = handler_dict['weights']\n if weight_dict is not None:\n # Network batch\n net = weight_dict['net']\n if net is not None:\n repr_dict = net['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = net['perturb']\n perturbs = []\n if pert_list is not None:\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n\n for param in list(self.net.named_parameters()):\n tensor_name = param[0]\n self.tensor_info[tensor_name] = (param[1], perturbs, repr)\n\n # Modules batch\n modules = weight_dict['modules']\n if modules is not None:\n for module in modules:\n module_name = module['name']\n\n repr_dict = module['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = module['perturb']\n perturbs = []\n if pert_list is not None:\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n else:\n perturbs=None\n \n current_mod = dict(self.net.named_modules())[module_name]\n for param_key in dict(current_mod.named_parameters()):\n full_key = module_name + '.' + param_key\n tens = dict(current_mod.named_parameters())[param_key]\n self.tensor_info[full_key] = (tens, perturbs, repr)\n\n # Tensors\n tensors = weight_dict['tensors']\n if tensors is not None:\n for tensor in tensors:\n tensor_name = tensor['name']\n\n repr_dict = tensor['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = tensor['perturb']\n perturbs = []\n if pert_list is not None:\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n else:\n perturbs=None\n\n tens = dict(self.net.named_parameters())[tensor_name]\n self.tensor_info[tensor_name] = (tens, perturbs, repr)\n\n # Activations\n acti_dict = handler_dict['activations']\n if acti_dict is not None:\n # Network batch\n net = acti_dict['net']\n if net is not None:\n repr_dict = net['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = net['perturb']\n perturbs = []\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n\n for name, module in self.net.named_modules():\n hook = Hook(perturbs, repr)\n self.hooks[name] = module.register_forward_hook(\n hook.hook_fn)\n self.acti_info[name] = (perturbs, repr)\n\n # Modules batch\n modules = acti_dict['modules']\n if modules is not None:\n for module in modules:\n module_name = module['name']\n\n repr_dict = module['repr']\n repr = R.construct_repr(repr_dict)\n\n pert_list = module['perturb']\n perturbs = []\n for pert_dict in pert_list:\n pert = P.construct_pert(pert_dict)\n perturbs.append(pert)\n\n current_mod = dict(self.net.named_modules())[module_name]\n hook = Hook(perturbs, repr)\n self.hooks[module_name] = current_mod.register_forward_hook(\n hook.hook_fn)\n self.acti_info[module_name] = (perturbs, repr)\n\n # Cluster assignement\n self.assign_clusters()",
"def _build(self, shape):\n if self.multi_label:\n if shape.ndims != 2:\n raise ValueError('`y_true` must have rank=2 when `multi_label` is '\n 'True. Found rank %s.' % shape.ndims)\n self._num_labels = shape[1]\n variable_shape = tensor_shape.TensorShape(\n [tensor_shape.Dimension(self.num_thresholds), self._num_labels])\n\n else:\n variable_shape = tensor_shape.TensorShape(\n [tensor_shape.Dimension(self.num_thresholds)])\n self._build_input_shape = shape\n # Create metric variables\n self.true_positives = self.add_weight(\n 'true_positives',\n shape=variable_shape,\n initializer=init_ops.zeros_initializer)\n self.true_negatives = self.add_weight(\n 'true_negatives',\n shape=variable_shape,\n initializer=init_ops.zeros_initializer)\n self.false_positives = self.add_weight(\n 'false_positives',\n shape=variable_shape,\n initializer=init_ops.zeros_initializer)\n self.false_negatives = self.add_weight(\n 'false_negatives',\n shape=variable_shape,\n initializer=init_ops.zeros_initializer)\n\n if self.multi_label:\n with ops.init_scope():\n # This should only be necessary for handling v1 behavior. In v2, AUC\n # should be initialized outside of any tf.functions, and therefore in\n # eager mode.\n if not context.executing_eagerly():\n K._initialize_variables(K._get_session()) # pylint: disable=protected-access\n\n self._built = True",
"def _build_summaries(self):\n max_outputs = 3\n summaries = []\n\n # images\n # ------------------------------------------------\n summary_input_shape = image_utils.get_image_summary_shape(self._input_shape_visualisation)\n\n # input images\n input_summary_reshape = tf.reshape(self._input_values, summary_input_shape, name='input_summary_reshape')\n input_summary_op = tf.summary.image('input_images', input_summary_reshape, max_outputs=max_outputs)\n summaries.append(input_summary_op)\n\n # degraded, target and completed images, and histograms where relevant\n target = self._dual.get_op('target')\n degraded = self._dual.get_op('degraded')\n decoding_op = self.get_decoding_op()\n\n output_hist = tf.summary.histogram(\"output\", decoding_op)\n summaries.append(output_hist)\n\n input_hist = tf.summary.histogram(\"input\", self._input_values)\n summaries.append(input_hist)\n\n # network output when presented with blank\n blank_output_first = self._dual.get_op('blank_output_first')\n blank_first = tf.summary.image('blank_first', tf.reshape(blank_output_first, summary_input_shape))\n summaries.append(blank_first)\n\n blank_output_last = self._dual.get_op('blank_output_last')\n blank_last = tf.summary.image('blank_last', tf.reshape(blank_output_last, summary_input_shape))\n summaries.append(blank_last)\n \n with tf.name_scope('optimize'):\n completed_summary_reshape = tf.reshape(decoding_op, summary_input_shape, 'completed_summary_reshape')\n summaries.append(tf.summary.image('b_completed', completed_summary_reshape))\n\n if self._hparams.bt_degrade:\n degraded_summary_reshape = tf.reshape(degraded, summary_input_shape, 'degraded_summary_reshape')\n summaries.append(tf.summary.image('a_degraded', degraded_summary_reshape))\n\n target_summary_reshape = tf.reshape(target, summary_input_shape, 'target_summary_reshape')\n summaries.append(tf.summary.image('c_target', target_summary_reshape))\n\n # display slow weights as images and distributions\n with tf.name_scope('slow-weights'):\n w = self._dual.get_op('w')\n add_square_as_square(summaries, w, 'w')\n\n w_hist = tf.summary.histogram(\"w\", w)\n summaries.append(w_hist)\n\n alpha = self._dual.get_op('alpha')\n add_square_as_square(summaries, alpha, 'alpha')\n\n alpha_hist = tf.summary.histogram(\"alpha\", alpha)\n summaries.append(alpha_hist)\n\n if self._hparams.bias:\n bias = self._dual.get_op('bias')\n bias_image_shape, _ = image_utils.square_image_shape_from_1d(self._hparams.filters)\n bias_image = tf.reshape(bias, bias_image_shape, name='bias_summary_reshape')\n summaries.append(tf.summary.image('bias', bias_image))\n\n bias_hist = tf.summary.histogram(\"bias\", bias)\n summaries.append(bias_hist)\n\n # eta\n eta_op = self._dual.get_op('eta')\n eta_scalar = tf.reduce_sum(eta_op)\n eta_summary = tf.summary.scalar('eta', eta_scalar)\n summaries.append(eta_summary)\n\n # x_shift\n x_shift_op = self._dual.get_op('x_shift')\n xs_scalar = tf.reduce_sum(x_shift_op)\n xs_summary = tf.summary.scalar('x_shift', xs_scalar)\n summaries.append(xs_summary)\n\n # display fast weights (eta and hebbian), as image, scalars and histogram\n with tf.name_scope('fast-weights'):\n\n # as images\n hebb = self._dual.get_op('hebb')\n add_square_as_square(summaries, hebb, 'hebb')\n\n # as scalars\n hebb_summary = tf_build_stats_summaries_short(hebb, 'hebb')\n summaries.append(hebb_summary)\n\n # as histograms\n hebb_hist = tf.summary.histogram(\"hebb\", hebb)\n summaries.append(hebb_hist)\n\n hebb_per_neuron = tf.reduce_sum(tf.abs(hebb), 0)\n hebb_per_neuron = tf.summary.histogram('hebb_pn', hebb_per_neuron)\n summaries.append(hebb_per_neuron)\n\n # outer products\n outer_first = self._dual.get_op('outer_first')\n outer_last = self._dual.get_op('outer_last')\n add_square_as_square(summaries, outer_first, 'outer_first')\n add_square_as_square(summaries, outer_last, 'outer_last')\n\n # optimization related quantities\n with tf.name_scope('optimize'):\n # loss\n loss_op = self.get_loss_op()\n loss_summary = tf.summary.scalar('loss', loss_op)\n summaries.append(loss_summary)\n\n # losses as an image\n losses = self._dual.get_op(\"losses\")\n shape = losses.get_shape().as_list()\n volume = np.prod(shape[1:])\n losses_image_shape, _ = image_utils.square_image_shape_from_1d(volume)\n losses_image = tf.reshape(losses, losses_image_shape)\n summaries.append(tf.summary.image('losses', losses_image))\n\n input_stats_summary = tf_build_stats_summaries_short(self._input_values, 'input-stats')\n summaries.append(input_stats_summary)\n\n return summaries",
"def build(self, input_tensors, is_training, lengths=None, hparams=None):\n input_tensor = input_tensors[-1]\n net = tf_slim.conv2d(\n input_tensor,\n get_channel_dim(input_tensor),\n kernel_size=self._kernel_size,\n padding='same')\n\n # Batch norm\n if self._apply_batch_norm:\n net = tf_slim.batch_norm(net, is_training=is_training)\n net = tf.nn.leaky_relu(net)\n\n net = tf_slim.conv2d(\n input_tensor,\n get_channel_dim(input_tensor),\n kernel_size=self._kernel_size,\n padding='same')\n\n net += input_tensor\n # Batch norm\n if self._apply_batch_norm:\n net = tf_slim.batch_norm(net, is_training=is_training)\n\n net = tf.nn.leaky_relu(net)\n return input_tensors + [net]",
"def build(self, input_tensors, is_training, lengths=None, hparams=None):\n input_tensor = input_tensors[-1]\n net1 = tf_slim.conv2d(\n input_tensor,\n get_channel_dim(input_tensor),\n kernel_size=self._kernel_size,\n padding='same')\n\n net = tf.nn.leaky_relu(net1)\n\n net1 = tf_slim.conv2d(\n net,\n get_channel_dim(input_tensor),\n kernel_size=self._kernel_size,\n padding='same')\n\n net2 = tf_slim.conv2d(\n input_tensor,\n get_channel_dim(input_tensor),\n kernel_size=self._kernel_size,\n padding='same')\n\n net = tf.nn.leaky_relu(net2)\n\n net2 = tf_slim.conv2d(\n net,\n get_channel_dim(input_tensor),\n kernel_size=self._kernel_size,\n padding='same')\n\n net1 /= 2\n net2 /= 2\n input_tensor += net1\n input_tensor += net2\n\n net = tf.nn.leaky_relu(input_tensor)\n return input_tensors + [net]"
]
| [
"0.5455603",
"0.5400152",
"0.5234202",
"0.51972973",
"0.5190988",
"0.51100725",
"0.50209606",
"0.50017303",
"0.4946088",
"0.4935415",
"0.4932959",
"0.49186134",
"0.4878031",
"0.48477086",
"0.4841599",
"0.48182094",
"0.48147652",
"0.48146516",
"0.47980627",
"0.47892433",
"0.4765979",
"0.47637603",
"0.47400507",
"0.47375217",
"0.47280228",
"0.47075686",
"0.46801504",
"0.46773165",
"0.46702352",
"0.4668417"
]
| 0.7305379 | 0 |
Enumerates nested types along a column_path. A nested type is either a listlike type or a struct type. It uses `column_path`[0] to first address a field in the schema, and enumerates its type. If that type is nested, it enumerates its child and continues recursively until the column_path reaches an end. The child of a listlike type is its value type. The child of a struct type is the type of the child field of the name given by the corresponding step in the column_path. | def _EnumerateTypesAlongPath(arrow_schema: pa.Schema,
column_path: path.ColumnPath,
stop_at_path_end: bool = False) -> pa.DataType:
field_name = column_path.initial_step()
column_path = column_path.suffix(1)
arrow_field = arrow_schema.field(field_name)
arrow_type = arrow_field.type
yield arrow_type
while True:
if stop_at_path_end and not column_path:
break
if pa.types.is_struct(arrow_type):
# get the field from the StructType
if not column_path:
break
curr_field_name = column_path.initial_step()
column_path = column_path.suffix(1)
try:
arrow_field = arrow_type[curr_field_name]
except KeyError as e:
raise ValueError(
"Field '{}' could not be found in the current Struct: '{}'".format(
curr_field_name, arrow_type)) from e
arrow_type = arrow_field.type
elif _IsListLike(arrow_type):
arrow_type = arrow_type.value_type
else:
yield arrow_type
if column_path:
raise ValueError(
"The arrow_schema fields are exhausted, but there are remaining "
"fields in the column_path: '{}'".format(column_path))
break
yield arrow_type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _GetNestDepthAndValueType(\n arrow_schema: pa.Schema,\n column_path: path.ColumnPath) -> Tuple[int, pa.DataType]:\n arrow_type = arrow_schema.field(column_path.steps()[0]).type\n depth = 0\n\n for arrow_type in _EnumerateTypesAlongPath(arrow_schema, column_path):\n if _IsListLike(arrow_type):\n depth += 1\n\n return depth, arrow_type",
"def walk_path(model, path):\n current_model = model\n path_length = path.count(\".\")\n path_part = None\n outermost_path_part = None\n innermost_table_name = None\n\n for i, path_elem in enumerate(path.split(\".\")):\n column = getattr(current_model, path_elem)\n\n # This is not a column managed by sqlalchemy, ignore it\n if not isinstance(column, InstrumentedAttribute):\n # Let's assume some other path also covers this table\n return None, None\n\n prop = column.property\n\n if isinstance(prop, RelationshipProperty):\n mapper = class_mapper(current_model)\n pk = mapper.primary_key[0].name\n tablename = mapper.mapped_table.name\n innermost_table_name = prop.table.name\n\n if prop.direction == ONETOMANY:\n new_path_part = OneToManyPathPart(tablename, pk)\n if i == path_length:\n remote_side = list(prop.remote_side)[0]\n remote_column = remote_side.name\n inner = ColumnPathPart(\"\", remote_column)\n new_path_part.inner = inner\n innermost_table_name = remote_side.table.name\n elif prop.direction == MANYTOONE:\n new_path_part = ManyToOnePathPart(tablename, pk,\n column.key)\n\n current_model = prop.mapper.class_\n\n elif (isinstance(prop, ColumnProperty) or\n isinstance(prop, CompositeProperty)):\n # We're not interested in columns (or a collection or them) because\n # the relationship handling takes care of selections on primary keys\n # etc.\n return None, None\n\n if path_part is None:\n path_part = new_path_part\n outermost_path_part = path_part\n else:\n path_part.inner = new_path_part\n path_part = new_path_part\n\n if path_part.inner is None:\n # The path ended in a relationship property\n path_part.inner = ColumnPathPart(\"\", \"id\")\n\n return outermost_path_part, innermost_table_name",
"def explore_type(name, datatype, is_child):\n type_code = datatype.code\n type_desc = \"\"\n if type_code == gdb.TYPE_CODE_STRUCT:\n type_desc = \"struct/class\"\n else:\n type_desc = \"union\"\n\n fields = datatype.fields()\n if CompoundExplorer._get_real_field_count(fields) == 0:\n if is_child:\n print (\"%s is a %s of type '%s' with no fields.\" %\n (name, type_desc, str(datatype)))\n Explorer.return_to_enclosing_type_prompt()\n else:\n print (\"'%s' is a %s with no fields.\" % (name, type_desc))\n return False\n\n if is_child:\n print (\"%s is a %s of type '%s' \"\n \"with the following fields:\\n\" %\n (name, type_desc, str(datatype)))\n else:\n print (\"'%s' is a %s with the following \"\n \"fields:\\n\" %\n (name, type_desc))\n\n has_explorable_fields = False\n current_choice = 0\n choice_to_compound_field_map = { }\n print_list = [ ]\n for field in fields:\n if field.artificial:\n continue\n if field.is_base_class:\n field_desc = \"base class\"\n else:\n field_desc = \"field\"\n rhs = (\"<Enter %d to explore this %s of type '%s'>\" %\n (current_choice, field_desc, str(field.type)))\n print_list.append((field.name, rhs))\n choice_to_compound_field_map[str(current_choice)] = (\n field.name, field.type, field_desc)\n current_choice = current_choice + 1\n\n CompoundExplorer._print_fields(print_list)\n print (\"\")\n\n if len(choice_to_compound_field_map) > 0:\n choice = raw_input(\"Enter the field number of choice: \")\n if choice in choice_to_compound_field_map:\n if is_child:\n new_name = (\"%s '%s' of %s\" % \n (choice_to_compound_field_map[choice][2],\n choice_to_compound_field_map[choice][0],\n name))\n else:\n new_name = (\"%s '%s' of '%s'\" % \n (choice_to_compound_field_map[choice][2],\n choice_to_compound_field_map[choice][0],\n name))\n Explorer.explore_type(new_name,\n choice_to_compound_field_map[choice][1], True)\n return True\n else:\n if is_child:\n Explorer.return_to_enclosing_type()\n else:\n if is_child:\n Explorer.return_to_enclosing_type_prompt()\n\n return False",
"def explore_type(name, datatype, is_child):\n if datatype.code == gdb.TYPE_CODE_ENUM:\n if is_child:\n print (\"%s is of an enumerated type '%s'.\" %\n (name, str(datatype)))\n else:\n print (\"'%s' is an enumerated type.\" % name)\n else:\n if is_child:\n print (\"%s is of a scalar type '%s'.\" %\n (name, str(datatype)))\n else:\n print (\"'%s' is a scalar type.\" % name)\n\n if is_child:\n Explorer.return_to_enclosing_type_prompt()\n Explorer.return_to_enclosing_type()\n\n return False",
"def explore_type(name, datatype, is_child):\n target_type = datatype.target()\n Explorer.explore_type(name, target_type, is_child)\n return False",
"def iter_child_nodes(node):\r\n for name, field in iter_fields(node):\r\n if isinstance(field, AST):\r\n yield field\r\n elif isinstance(field, list):\r\n for item in field:\r\n if isinstance(item, AST):\r\n yield item",
"def generic_visit(self, node):\r\n for field, value in iter_fields(node):\r\n if isinstance(value, list):\r\n for item in value:\r\n if isinstance(item, AST):\r\n self.visit(item)\r\n elif isinstance(value, AST):\r\n self.visit(value)",
"def generic_visit(self, node):\n for field, value in ast.iter_fields(node):\n if isinstance(value, list):\n for item in reversed(value):\n if isinstance(item, ast.AST):\n self.visit(item)\n elif isinstance(value, ast.AST):\n self.visit(value)",
"def nested_lookup(doc, field):\n value = doc\n keys = field.split(\".\")\n try:\n for k in keys:\n if isinstance(value, (list, tuple)):\n # assuming we have a list of dict with k as one of the keys\n stype = set([type(e) for e in value])\n if not stype:\n return None\n assert len(stype) == 1 and stype == {dict}, \"Expecting a list of dict, found types: %s\" % stype\n value = [e[k] for e in value if e.get(k)]\n # can't go further ?\n return value\n else:\n value = value[k]\n except KeyError:\n return None\n\n return value",
"def IterToValueType(self, value_type: type) -> Generator[tuple, None, None]:\n if self._value_type() == value_type:\n for k, v in self.items():\n yield k, v\n else:\n for k, v in self.items():\n for nested_value in v.IterToValueType(value_type):\n yield (k, ) + nested_value",
"def __iter__(self):\n element = self\n\n while element.HasField(\"pathtype\"):\n yield element\n\n if element.HasField(\"nested_path\"):\n element = element.nested_path\n else:\n break",
"def explore_type(name, datatype, is_child):\n type_code = datatype.code\n if type_code in Explorer.type_code_to_explorer_map:\n explorer_class = Explorer.type_code_to_explorer_map[type_code]\n while explorer_class.explore_type(name, datatype, is_child):\n pass\n else:\n print (\"Explorer for type '%s' not yet available.\\n\" %\n str(datatype))",
"def get_fields_of_type(df, data_type='StructType'):\n return [f for f in df.schema.fields if str(f.dataType).find(data_type) > -1]",
"def resolve_type(type_path, builder):\n namespaces = get_parent_namespaces(builder)\n for i in reversed(range(len(namespaces) + 1)):\n full_type = \".\".join(namespaces[0:i] + [ type_path ])\n if full_type in field_types:\n return full_type\n\n raise RuntimeError(\"Cannot resolve field type.\")",
"def _parse_types(self):\n for root in self.roots:\n for types in root.iter('types'):\n for node in types.iter('type'):\n type_name = GLGenerator.get_name(node)\n text = GLGenerator.get_text(node).strip()\n if '*' in text and not text.startswith('struct'):\n self.pointer_types.append(type_name)",
"def get_nested_node(cursor: Cursor) -> Cursor:\n if cursor.kind in (\n cindex.CursorKind.TYPEDEF_DECL,\n cindex.CursorKind.FIELD_DECL,\n cindex.CursorKind.VAR_DECL,\n ):\n try:\n underlying_node = next(cursor.get_children())\n if underlying_node.kind in (\n cindex.CursorKind.STRUCT_DECL,\n cindex.CursorKind.UNION_DECL,\n cindex.CursorKind.ENUM_DECL,\n ):\n return underlying_node\n except StopIteration:\n # No children for typedefs of native types, i.e. `typedef int some_int;`\n pass\n\n return cursor",
"def _parse_types(self, die):\n if die.offset in self._visited_die_offset:\n return\n else:\n self._visited_die_offset.append(die.offset)\n\n if die.tag == \"DW_TAG_base_type\":\n self._parse_base_type(die)\n\n elif die.tag == \"DW_TAG_const_type\":\n self._parse_const_type(die)\n\n elif die.tag == \"DW_TAG_volatile_type\":\n self._parse_volatile_type(die)\n\n elif die.tag == \"DW_TAG_typedef\":\n self._parse_typedef(die)\n\n elif die.tag == \"DW_TAG_pointer_type\":\n self._parse_pointer_type(die)\n\n elif die.tag == \"DW_TAG_array_type\":\n self._parse_array_type(die)\n\n elif die.tag == \"DW_TAG_enumeration_type\":\n self._parse_enums_type(die)\n\n # union and class are not implemented yet, use structure.\n elif die.tag == \"DW_TAG_structure_type\":\n self._parse_structure_type(die)\n elif die.tag == \"DW_TAG_union_type\":\n self._parse_structure_type(die)\n elif die.tag == \"DW_TAG_class_type\":\n self._parse_structure_type(die)\n\n elif die.tag == \"DW_TAG_subroutine_type\":\n self._parse_subroutine_type(die)\n\n else:\n ...\n\n if die.tag == \"DW_TAG_compile_unit\":\n return\n\n # if has children, iter them, except DW_TAG_compile_unit.\n for child_die in die.iter_children():\n self._parse_types(child_die)",
"def generic_visit(self, node):\n for field in node._fields:\n try:\n value = getattr(node, field)\n except AttributeError:\n continue\n if isinstance(value, list):\n for item in value:\n if isinstance(item, ast.AST):\n self.visit(item)\n elif isinstance(value, ast.AST):\n self.visit(value)",
"def explore_type(name, datatype, is_child):\n actual_type = datatype.strip_typedefs()\n if is_child:\n print (\"The type of %s is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n else:\n print (\"The type '%s' is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n\n Explorer.explore_type(name, actual_type, is_child)\n return False",
"def test_type_builder_handles_nested_properties():\n schema = [\n SchemaObject(\n name=\"ClassWithNestedClass\",\n properties=[\n SchemaObject(\n name=\"nestedValue\",\n properties=[\n SchemaValue(name=\"string_value\", value_type=\"string\"),\n SchemaEnum(\n name=\"enum_value\",\n value_type=\"string\",\n values=[\"hey\", \"new\", \"value\"],\n ),\n ],\n ),\n ],\n )\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 3\n assert build_result[0] == ClassDefinition(\n name=\"ClassWithNestedClass\",\n properties=[\n PropertyDefinition(\n name=\"nested_value\",\n key=\"nestedValue\",\n value_type=\"ClassWithNestedClassNestedValue\",\n known_type=False,\n ),\n ],\n depends_on={\"ClassWithNestedClassNestedValue\"},\n )\n assert build_result[1] == ClassDefinition(\n name=\"ClassWithNestedClassNestedValue\",\n properties=[\n PropertyDefinition(\n name=\"string_value\",\n key=\"string_value\",\n value_type=\"str\",\n known_type=True,\n ),\n PropertyDefinition(\n name=\"enum_value\",\n key=\"enum_value\",\n value_type=\"ClassWithNestedClassNestedValueEnumValue\",\n known_type=False,\n ),\n ],\n depends_on={\"ClassWithNestedClassNestedValueEnumValue\"},\n )\n assert build_result[2] == EnumDefinition(\n name=\"ClassWithNestedClassNestedValueEnumValue\",\n values=[(\"HEY\", \"hey\"), (\"NEW\", \"new\"), (\"VALUE\", \"value\")],\n depends_on=set(),\n )",
"def test_get_type_for_key_path_multi_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.Beneficiary.Name\")\n == \"string\"\n )",
"def getFileColsAsTypedRecords(dirPath, columns, types, delim=\",\"):\n\t(dtypes, cvalues) = extractTypesFromString(types)\t\n\ttdata = list()\n\tfor rec in fileSelFieldsRecGen(dirPath, columns, delim):\n\t\ttrec = list()\n\t\tfor indx, value in enumerate(rec):\n\t\t\ttindx = columns[indx]\n\t\t\tvalue = __convToTyped(tindx, value, dtypes)\n\t\t\ttrec.append(value)\n\t\ttdata.append(trec)\n\treturn tdata",
"def __dfs(self, subtree, path):\n if isinstance(subtree, list):\n for node in subtree:\n for child in self.__dfs(node, path + \"[\" + str(subtree.index(node)) + \"]\"):\n yield child\n elif isinstance(subtree, dict):\n for node in subtree:\n for child in self.__dfs(subtree[node], path + \"/\" + node):\n yield child\n else: # Leaf node\n yield (subtree, path)",
"def visit_types(types: Iterable[UnresolvedType], visited=None):\n visited = visited or set()\n\n for type_ in (t for t in types if t not in visited):\n if isinstance(type_, DeferredType):\n continue\n yield type_\n visited.add(type_)\n next_types = (t for t in chain(\n [field.type_ for field in type_.fields],\n [field.type_ for field in type_.input_fields],\n type_.interfaces,\n type_.possible_types,\n [type_.of_type] if type_.of_type else [],\n ) if t not in visited)\n yield from visit_types(next_types, visited)",
"def _validate_type_recursively(\n mapping: Mapping[str, Any], ref: str, types: Set[str],\n depth: int) -> Optional[SchemaError]:\n # pylint: disable=too-many-return-statements\n # pylint: disable=too-many-branches\n\n if depth == 0:\n # Enforce the type identifier and the description\n # at the top of the definition\n if isinstance(mapping, collections.OrderedDict):\n keys = list(mapping.keys())\n if keys[0] != 'type':\n return SchemaError(\n message=(\n \"Expected 'type' at the top of the definition, \"\n \"but got {}\").format(keys[0]),\n ref='{}/type'.format(ref))\n\n if 'description' in mapping and keys[1] != 'description':\n return SchemaError(\n message=(\n \"Expected 'description' just after 'type' \"\n \"in the definition, but got {}\").format(keys[1]),\n ref='{}/description'.format(ref))\n\n if mapping['type'] not in types:\n return SchemaError(\n message=\"Invalid type: {}\".format(mapping['type']),\n ref='{}/type'.format(ref))\n\n # Validate against the type schema\n if mapping['type'] in mapry.schemas.TYPE_TO_SCHEMA:\n type_schema = mapry.schemas.TYPE_TO_SCHEMA[mapping['type']]\n scherr = _validate_type_against_schema(\n mapping=mapping, ref=ref, json_schema=type_schema)\n if scherr is not None:\n return scherr\n\n # Collect expected mapping keys;\n # the default 'type' and 'description' always apply.\n expected_keys = {\"type\", \"description\"}\n\n # If this is a property type definition (and not a nested type definition)\n # expect all the property keys.\n if depth == 0:\n expected_keys.update(\n mapry.schemas.GRAPH[\"definitions\"][\"Property\"] # type: ignore\n [\"properties\"].keys())\n\n # If this is a non-composite type, add extra type-specific expected keys.\n if mapping['type'] in mapry.schemas.TYPE_TO_SCHEMA:\n type_schema = mapry.schemas.TYPE_TO_SCHEMA[mapping['type']]\n expected_keys.update(type_schema[\"properties\"].keys()) # type: ignore\n\n for key in mapping:\n if key not in expected_keys:\n return SchemaError(\n message=(\n \"Additional properties are not allowed \"\n \"({!r} was unexpected)\").format(key),\n ref=ref)\n\n # Validate logic\n if mapping['type'] == 'boolean':\n return None # no logical checks for booleans\n\n if mapping['type'] == 'integer':\n return _validate_integer(mapping=mapping, ref=ref)\n\n if mapping['type'] == 'float':\n return _validate_float(mapping=mapping, ref=ref)\n\n if mapping['type'] == 'string':\n return _validate_string(mapping=mapping, ref=ref)\n\n if mapping['type'] == 'path':\n return _validate_path(mapping=mapping, ref=ref)\n\n if mapping['type'] == 'date':\n return _validate_date(mapping=mapping, ref=ref)\n\n if mapping['type'] == 'time':\n return _validate_time(mapping=mapping, ref=ref)\n\n if mapping['type'] == 'datetime':\n return _validate_datetime(mapping=mapping, ref=ref)\n\n if mapping['type'] == 'duration':\n return None # no logical checks for durations\n\n if mapping['type'] == 'time_zone':\n return None # no logical checks for time_zones\n\n if mapping['type'] == 'array':\n return _validate_array(\n mapping=mapping, ref=ref, types=types, depth=depth)\n\n if mapping['type'] == 'map':\n return _validate_map(mapping=mapping, ref=ref, types=types, depth=depth)\n\n if mapping['type'] in _NONCOMPOSITE_TYPE_SET:\n raise AssertionError(\"Unhandled type: {}\".format(mapping['type']))\n\n # It's a composite; composite types lack \"type\" property.\n return None",
"def type_hierarchy(self):\n\t\treturn self._node.type_hierarchy",
"def explore_expr(expr, value, is_child):\n datatype = value.type\n type_code = datatype.code\n fields = datatype.fields()\n\n if type_code == gdb.TYPE_CODE_STRUCT:\n type_desc = \"struct/class\"\n else:\n type_desc = \"union\"\n\n if CompoundExplorer._get_real_field_count(fields) == 0:\n print (\"The value of '%s' is a %s of type '%s' with no fields.\" %\n (expr, type_desc, str(value.type)))\n if is_child:\n Explorer.return_to_parent_value_prompt()\n return False\n\n print (\"The value of '%s' is a %s of type '%s' with the following \"\n \"fields:\\n\" % (expr, type_desc, str(value.type)))\n\n has_explorable_fields = False\n choice_to_compound_field_map = { }\n current_choice = 0\n print_list = [ ]\n for field in fields:\n if field.artificial:\n continue\n field_full_name = Explorer.guard_expr(expr) + \".\" + field.name\n if field.is_base_class:\n field_value = value.cast(field.type)\n else:\n field_value = value[field.name]\n literal_value = \"\"\n if type_code == gdb.TYPE_CODE_UNION:\n literal_value = (\"<Enter %d to explore this field of type \"\n \"'%s'>\" % (current_choice, str(field.type)))\n has_explorable_fields = True\n else:\n if Explorer.is_scalar_type(field.type):\n literal_value = (\"%s .. (Value of type '%s')\" %\n (str(field_value), str(field.type)))\n else:\n if field.is_base_class:\n field_desc = \"base class\"\n else:\n field_desc = \"field\"\n literal_value = (\"<Enter %d to explore this %s of type \"\n \"'%s'>\" %\n (current_choice, field_desc,\n str(field.type)))\n has_explorable_fields = True\n\n choice_to_compound_field_map[str(current_choice)] = (\n field_full_name, field_value)\n current_choice = current_choice + 1\n\n print_list.append((field.name, literal_value))\n\n CompoundExplorer._print_fields(print_list)\n print (\"\")\n\n if has_explorable_fields:\n choice = raw_input(\"Enter the field number of choice: \")\n if choice in choice_to_compound_field_map:\n Explorer.explore_expr(choice_to_compound_field_map[choice][0],\n choice_to_compound_field_map[choice][1],\n True)\n return True\n else:\n if is_child:\n Explorer.return_to_parent_value()\n else:\n if is_child:\n Explorer.return_to_parent_value_prompt()\n\n return False",
"def find_typerefs(node, typename):\n if node.kind.is_reference():\n ref_node = node.referenced \n if ref_node.spelling == typename:\n print 'Found ref %s [line=%s, col=%s]' % (\n typename, node.location.line, node.location.column)\n else:\n print 'Found ? %s %s [line=%s, col=%s]' % (node.kind, node.spelling, node.location.line, node.location.column)\n # Recurse for children of this node\n for c in node.get_children():\n find_typerefs(c, typename)",
"def unpack_children(stream, template):\n cols = list(template.children()) or [template]\n\n out = []\n for col in cols:\n # sequences and other structures\n if isinstance(col, pydap.model.SequenceType):\n out.append(pydap.handlers.lib.IterData(list(unpack_sequence(stream, col)), col))\n elif isinstance(col, pydap.model.StructureType):\n out.append(tuple(unpack_children(stream, col)))\n\n # unpack arrays\n else:\n out.extend(convert_stream_to_list(stream, col.dtype, col.shape, col.id))\n return out",
"def preorder_types(type_signature: computation_types.Type):\n yield type_signature\n for child in type_signature.children():\n yield from preorder_types(child)"
]
| [
"0.67806774",
"0.5649272",
"0.52802294",
"0.5041527",
"0.5004218",
"0.50018305",
"0.49943122",
"0.4993439",
"0.4964379",
"0.49569297",
"0.4941945",
"0.4900904",
"0.48964566",
"0.48888087",
"0.48849034",
"0.48633215",
"0.4858233",
"0.48521087",
"0.48463467",
"0.48446807",
"0.47851756",
"0.47351643",
"0.47173703",
"0.46909496",
"0.4676545",
"0.46486452",
"0.46443257",
"0.4610752",
"0.45992082",
"0.45876607"
]
| 0.7417042 | 0 |
Returns a function that converts a StringArray to BinaryArray. | def _GetConvertToBinaryFn(
array_type: pa.DataType) -> Optional[Callable[[pa.Array], pa.Array]]:
if pa.types.is_string(array_type):
return lambda array: array.view(pa.binary())
if pa.types.is_large_string(array_type):
return lambda array: array.view(pa.large_binary())
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tobinary_multiples(arr):\n return [np.array(arr_i).tobytes() for arr_i in arr]",
"def get_string_binary(string):\r\n string_binary_array = []\r\n\r\n # Create array of binaries from the string\r\n for character in string:\r\n string_binary_array.append(get_binary(character))\r\n\r\n # Combine those binaries into one long binary\r\n string_binary = \"\".join(string_binary_array)\r\n\r\n return string_binary",
"def str_to_bin(string):\n ret = list(string)\n # convert to binary representation\n ret = ['{:07b}'.format(ord(x)) for x in ret]\n # split the binary into\n ret = [[bit for bit in x] for x in ret]\n # flatten it and convert to integers\n ret = [int(bit) for sublist in ret for bit in sublist]\n return ret",
"def binary(message: str) -> bitarray:\n binary_message = bitarray()\n byte_message = bytes(message, encoding=\"ascii\")\n binary_message.frombytes(byte_message)\n return binary_message",
"def np2bv(int_arr, n_bits=8):\n # Step 1: Turn ndarray into a list of integers\n int_list = int_arr.tolist()\n\n # Step 2: Format each number as two's complement strings\n binarized = [format(x & 2 ** n_bits - 1, f'0{n_bits}b') if x < 0 else\n format(x, f'0{n_bits}b')\n for x in int_list]\n\n # Step 3: Join all strings into one large binary string\n bin_string = ''.join(binarized)\n\n # Step 4: Convert to cocotb BinaryValue and return\n return BinaryValue(bin_string)",
"def tobinary(ids):\n return np.array(ids).tobytes()",
"def string_to_bit_array(text):\n array = list()\n for char in text:\n bin_val = bin_value(char, 8) # Get value of char in one byte\n array.extend([int(x) for x in list(bin_val)]) # Add the bits to the list\n return array",
"def get_binary(string):\r\n # Use special logic for NULL_STRING to avoid errors\r\n if string == NULL_STRING:\r\n return \"00000000\"\r\n # Otherwise, gives the binary representation of UTF-8 characters\r\n return \"\".join(\"{:08b}\".format(d) for d in bytearray(string, \"utf-8\"))",
"def _str_to_binary_string(string: str) -> str:\n binary_string = \"\"\n for char in string:\n ascii_code = ord(char)\n binary_string += format(ascii_code, \"08b\")\n\n if binary_string:\n return binary_string\n else:\n raise ValueError(\"Error converting message to binary\")",
"def convertToStringArray(booleanArray: typing.List[bool]) -> typing.List[str]:\n ...",
"def to_bin(arr, bytelen=8):\n result = \"\"\n for c in arr:\n result += (\"0\" * bytelen + bin(c)[2:])[-bytelen:]\n return result",
"def str_to_numpy(string_array):\n if pd.isnull(string_array):\n return(np.NaN)\n else:\n return np.array(ast.literal_eval(string_array))",
"def bit_string_to_bytearray(bit_string):\n ret = []\n for idx in range(int(len(bit_string) / 8)):\n v = 0\n for idx0, bit in enumerate(bit_string[idx * 8:idx * 8 + 8]):\n v = v | (bit << (7 - idx0))\n ret.append(v)\n return bytearray(ret)",
"def convert_bytearray(func):\n def wrapped(ber, *args, **kwargs):\n return func(bytearray(ber), *args, **kwargs)\n return wrapped",
"def from_bit_array(bin_list):\n print(bin_list)\n byte_list = [hex(int(x,2)) for x in bin_list]\n result = bytes([int(x,0) for x in byte_list])\n return result",
"def str_to_byn(str_):\n\n return ' '.join(bin(byte).lstrip('0b') for Item in str_ for byte in Item.encode())",
"def bitarray_to_data(bits):\n return np.fromstring(bits,dtype = np.int32)",
"def string_to_bit_array(text_string: str) -> list:\n\n array = list()\n for char in text_string:\n # Get the char value on one byte\n bin_val = Des.bin_value(char, 8)\n # Add the bits to the final list\n array.extend([int(x) for x in list(bin_val)])\n return array",
"def base64_decode_array(inStr, dtype):\n return np.frombuffer(base64.decodestring(inStr), dtype=dtype)",
"def convertToByteArray(booleanArray: typing.List[bool]) -> typing.List[int]:\n ...",
"def to_bin(data):\n if isinstance(data, str):\n return ''.join([ format(ord(i), \"08b\") for i in data ])\n elif isinstance(data, bytes) or isinstance(data, np.ndarray):\n return [ format(i, \"08b\") for i in data ]\n elif isinstance(data, int) or isinstance(data, np.uint8):\n return format(data, \"08b\")\n else:\n raise TypeError(\"Type not supported.\")",
"def to_bin(data):\r\n if isinstance(data, str):\r\n return ''.join([ format(ord(i), \"08b\") for i in data ])\r\n elif isinstance(data, bytes) or isinstance(data, np.ndarray):\r\n return [ format(i, \"08b\") for i in data ]\r\n elif isinstance(data, int) or isinstance(data, np.uint8):\r\n return format(data, \"08b\")\r\n else:\r\n raise TypeError(\"Type not supported.\")",
"def octet_string_to_bytearray(octet_string):\n ret = []\n for ch in octet_string:\n ret.append(ch)\n return bytearray(ret)",
"def _to_bytes_or_str_array(result, output_dtype_like=None):\n ret = numpy.asarray(result.tolist())\n dtype = getattr(output_dtype_like, 'dtype', None)\n if dtype is not None:\n return ret.astype(type(dtype)(_get_num_chars(ret)), copy=False)\n return ret",
"def string_to_array(arg):\n\n res = arg.replace('[', '').replace(']', '').replace(',', '')\n return np.array(res.split(' '), dtype=np.int8)",
"def to_bytearray(x):\n if isinstance(x, bytearray):\n return x\n else:\n return bytearray(x)",
"def a2b(a):\n return binascii.unhexlify(a)",
"def stringToUtf8Array(input):\n if _haveTypeUnicode:\n # Assume this is Python 2.\n if type(input) is str:\n # Convert the raw string to an int array.\n return map(ord, input)\n elif type(input) is unicode:\n # In Python 2, the result of encode is a str, so convert to int array.\n return map(ord, input.encode('utf-8'))\n else:\n return input\n else:\n if type(input) is str:\n return input.encode('utf-8')\n else:\n return input",
"def arrayify(possible_array):\n if isinstance(possible_array, basestring):\n return [possible_array]\n return possible_array",
"def ascii_to_binary(string):\r\n\tbin_string = \"\"\r\n\tfor i in range(0,len(string)):\r\n\t\tbin_string += conversions.decimal_to_binary(search(alphabet, string[i])+32)\r\n\treturn bin_string"
]
| [
"0.652799",
"0.6486571",
"0.6189265",
"0.61772597",
"0.60881764",
"0.6065052",
"0.59320545",
"0.58606786",
"0.58524144",
"0.58404696",
"0.5823679",
"0.5797087",
"0.57892406",
"0.5775931",
"0.5766405",
"0.5760254",
"0.5730116",
"0.57267714",
"0.5702361",
"0.56939006",
"0.56433505",
"0.5640323",
"0.56296486",
"0.55954266",
"0.55820376",
"0.5558665",
"0.5506338",
"0.55018854",
"0.54949003",
"0.5447599"
]
| 0.71034527 | 0 |
Creates and concatenates the map and reduce pools, | def create_pools(finalize, reduce_size=-1):
# Create the reduce pool
LOGGER.debug("Creating reduce pool")
reduce_pool = RedPool(reduce_task)
# Set attributes
reduce_pool.on_done = finalize
if reduce_size > 1:
reduce_pool.group_size = reduce_size
# Create the map pool
LOGGER.debug("Creating map pool")
map_pool = MapPool(map_task, reduce_pool)
return map_pool | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_pool(self):\n self._pool = [0] * N\n if isinstance(self._bases, dict):\n for base, num in self._bases.items():\n self._pool[to_code(base)] += num\n elif isinstance(self._bases, list):\n for base in self._bases:\n self._pool[to_code(base)] += 1",
"def pool_combine(program_path='.', seq='999'):\n from os.path import dirname\n from subprocess import call\n\n new_pool = None\n for pool in get_pools(program_path=program_path, latest=False):\n if new_pool is None:\n path = dirname(pool)\n new_pool = open('/'.join([path, make_poolname(pool, seq=seq)]), 'w')\n call(['head', '-n', '1', pool], stdout=new_pool)\n call(['tail', '-n', '+2', pool], stdout=new_pool)\n new_pool.close()",
"def create_omap(pool_fun, pool_configs, omap_configs):\n\n for entry in pool_configs.values():\n if not pool_fun.fill_omap_entries(pool_name=entry[\"pool_name\"], **omap_configs):\n log.error(f\"Omap entries not generated on pool {entry['pool_name']}\")\n return 1",
"def pool_job(self, func, inputs):\n\n if self.flag_use_mp:\n output = zip(*self._pool.map(func, inputs))\n self._consolidate_mp_logs()\n else:\n logger.info(\"Performing task serially\")\n output = self.serial_job(func, inputs)\n\n return output",
"def _repopulate_pool(self):\n for i in range(self._processes - len(self._pool)):\n w = self.Process(target=worker,\n args=(self._inqueue, self._outqueue,\n self._initializer,\n self._initargs, self._maxtasksperchild,\n self._wrap_exception,\n self._finalizer,\n self._finalargs)\n )\n self._pool.append(w)\n w.name = w.name.replace('Process', 'PoolWorker')\n w.daemon = True\n w.start()\n util.debug('added worker')",
"def reduce_run():",
"def run_map(self):\n # Split input into chunks for processing\n files = self.split_list()\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n self.output = pool.map(_run, files)\n # Close and join pool\n pool.close()\n pool.join()",
"def Allreduce4Group2(net, blobs, reduced_affix, gpu_indices):\n a, b, c, d = blobs\n gpu_a, gpu_b, gpu_c, gpu_d = gpu_indices\n # a_reduced <- a+b, c_reduced <- c + d\n a_reduced = net.Add(\n [a, b],\n str(a) + reduced_affix,\n device_option=OnGPU(gpu_a)\n )\n c_reduced = net.Add(\n [c, d],\n str(c) + reduced_affix,\n device_option=OnGPU(gpu_c)\n )\n # copy from c_reduce(gpu_c) to c_reduce_copy(gpu_a)\n c_reduced_copy = c_reduced.Copy(\n [],\n str(c_reduced) + '_copy',\n device_option=OnGPU(gpu_a)\n )\n # a_reduced <- a_reduced + c_reduced_copy\n a_reduced = a_reduced.Add(c_reduced_copy, a_reduced, device_option=OnGPU(gpu_a))\n # broadcast a_reduced to c_reduced\n c_reduced = a_reduced.Copy([], c_reduced, device_option=OnGPU(gpu_c))\n # broadcast to b and d\n b_reduced = a_reduced.Copy(\n [],\n str(b) + reduced_affix,\n device_option=OnGPU(gpu_b)\n )\n d_reduced = c_reduced.Copy(\n [],\n str(d) + reduced_affix,\n device_option=OnGPU(gpu_d)\n )\n return a_reduced, b_reduced, c_reduced, d_reduced",
"def _create_graph(self, pools: List[Pool]):\n for pool in pools:\n self._add_nodes(pool.tokens)\n\n for pool in pools: # noqa: WPS440,WPS441\n self._add_edges(pool) # noqa: WPS441",
"def pfmap(func, workers=8):\n return fmap(func)",
"def construct(self, x):\n results = []\n x = self.pool0(x)\n results.append(x)\n x = self.pool1(x)\n results.append(x)\n x = self.pool2(x)\n results.append(x)\n x = self.pool3(x)\n results.append(x)\n return results",
"def train_batch_create_mp(imagedirs, classes, indices, image_key, offset_percent, output_size, nprocesses):\r\n batch_size = len(indices)\r\n n_classes = len(classes)\r\n # now create the output class and pixel arrays\r\n output_array = np.zeros((batch_size, output_size[0], output_size[1], output_size[2]), dtype=np.float32)\r\n class_array = np.zeros((batch_size, n_classes), dtype=np.int8)\r\n batch_data = [image_key[i] for i in indices]\r\n whole_minibatch_size = batch_size // nprocesses\r\n num_whole_minibatches = batch_size // whole_minibatch_size\r\n input_list = []\r\n for i in range(num_whole_minibatches):\r\n input_list.append(batch_data[whole_minibatch_size*i:whole_minibatch_size*(1+i)])\r\n if batch_size % nprocesses != 0:\r\n input_list.append(batch_data[whole_minibatch_size*num_whole_minibatches:])\r\n frozen_params = (imagedirs, classes, offset_percent, output_size)\r\n partial_worker = partial(batch_worker, frozen_params=frozen_params)\r\n # initializes the pool of processes\r\n print('building pool')\r\n pool = multiprocessing.Pool(nprocesses)\r\n # maps partial_worker and list of stars to the pool, stores used parameters in a list\r\n print('mapping pool')\r\n outputs = pool.map(partial_worker, input_list)\r\n # end the list of functions to go to pool\r\n pool.close()\r\n print('pool closed')\r\n # wait for all processes to return\r\n pool.join()\r\n print('pool joined')\r\n counter = 0\r\n for i in range(len(outputs)):\r\n current_output = outputs[i]\r\n pixel_data = current_output[0]\r\n class_data = current_output[1]\r\n num_fish = len(pixel_data)\r\n for lf in range(num_fish):\r\n output_array[counter, :, :, :] = np.reshape(pixel_data[lf], output_size)\r\n class_array[counter, :] = class_data[lf]\r\n counter += 1\r\n return output_array, class_array",
"def create_input_multiprocess(ids):\n\n threads = 8 \n p = mp.Pool(threads)\n pool_results = p.map(create_input, np.array_split(ids, threads * 2))\n p.close()\n p.join()\n\n ret = pd.concat(pool_results)\n\n pool_results = None\n gc.collect()\n\n return ret",
"def add_to_pool(self):\n if self.check_pool():\n for func in self.getter._func:\n proxies = self.getter.get_proxies(func)\n for proxy in proxies:\n self.conn.push_to_right(proxy)\n else:\n print('Pool reached max capacity')",
"def reduce(self, app, nodes, result):",
"def build(self, input_tensors, is_training, lengths=None, hparams=None):\n input_tensor = input_tensors[-1]\n if input_tensor.get_shape().as_list()[2] < self._pool_size:\n return input_tensors\n\n max_pool = tf_slim.max_pool2d(\n input_tensor,\n self._pool_size,\n stride=self._strides,\n padding='same',\n )\n return input_tensors + [max_pool]",
"def _product(self, args):\n pools = map(tuple, args) #within original version args defined as *args\n result = [[]]\n for pool in pools:\n result = [x + [y] for x in result for y in pool]\n return result",
"def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, None, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n\n return pool_fn",
"def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, self.random_seed, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n return pool_fn",
"def rerank_mp(x2ys, x2cnt, x2xs, width, n_trans, num_workers):\n from multiprocessing import Pool\n\n shared_inputs = x2ys, x2cnt, x2xs, width, n_trans\n print(f\"Entering multiprocessing with {num_workers} workers...\"\n f\" (#words={len(x2ys)})\")\n with Pool(num_workers) as p:\n x2ys_cpe = dict(p.starmap(\n _rerank_mp,\n zip(x2ys.items(), it.repeat(shared_inputs)),\n ))\n return x2ys_cpe",
"def get_pools():\n poolinfostr = fork_and_get_output(\"zpool list -H -o all\".split())\n header = get_zpool_header()\n poolinfo = poolinfostr.splitlines()\n poolobjs = []\n for poolstr in poolinfo:\n poolobjs.append(DataZFS(poolstr, header, 'pool'))\n return poolobjs",
"def create_image_db():\n logging.info('=============> create_image_db: create image metadata json mapper file <===========')\n load_all_map_dir(manifest_map_dir, layer_json_map_dir, layer_config_map_dir)\n print \"create pool\"\n P = multiprocessing.Pool(60)\n print \"before map!\"\n print len(manifest_names) #process_manifest\n print len(layer_json_map_dir)\n print \"before map!\"\n #json_datas = []\n #for i in manifest_names:\n # json_datas.append(process_manifest(i))\n json_datas = P.map(process_manifest, manifest_names)\n print \"after map\"\n print \"write to files!\"\n write_json_datas(json_datas)",
"def _pool(\n pool_mode,\n nd_util,\n input,\n kernel_size,\n stride=1,\n padding=0,\n ceil_mode=False,\n):\n return FunctionLib.apply(\n 'Pool',\n input.device,\n [input],\n kernel_shape=nd_util(kernel_size),\n strides=nd_util(stride),\n pads=nd_util(padding),\n mode=pool_mode,\n ceil_mode=ceil_mode,\n )",
"def main():\n pool = Pool(processes=50)\n results = pool.imap_unordered(experiment, range(50), chunksize=1)\n\n # Output\n offset = 1\n # for i, (data_surv, data_order, data_ctrl) in enumerate(results):\n for i, (data_surv, data_ctrl) in enumerate(results):\n with open(f'../data/reproductive_barrier/hybrid_survival_percentage/experiment_{i+offset}.csv', 'w') as fp:\n for t, surv in data_surv:\n fp.write(f'{int(t)},{float(surv)}\\n')\n\n with open(f'../data/reproductive_barrier/order_of_incompatibility/experiment_{i+offset}.csv', 'w') as fp:\n for x in data_order:\n fp.write('%d,' % int(x[0]) + ','.join(map(str, x[1:])) + '\\n')\n\n with open(f'../data/reproductive_barrier/control_survival_percentage/experiment_{i+offset}.csv', 'w') as fp:\n for t, surv in data_ctrl:\n fp.write(f'{int(t)},{float(surv)}\\n')\n\n return",
"def test_map_reduce(self):\n outfile = cStringIO.StringIO()\n var_order = [2,1,0]\n\n \n # Write out in the order 2, 1, 0. In a normal program those constants\n # would come from an enum indicating the order in which the fields\n # appear in schema\n aggregator = lra.LineRawHandleAggregator(outfile, var_order = var_order)\n\n return_val_1 = aggregator.map({0: 'world', 1: 'there', 2: 'hello'})\n return_val_2 = aggregator.map({0: 'good', 1: 'is', 2: 'this'})\n\n self.assertIsNone(return_val_1)\n self.assertIsNone(return_val_2)\n self.assertEqual(outfile.getvalue(),\n 'INSERT\\nhello\\nthere\\nworld\\nENDINSERT\\n'\n 'INSERT\\nthis\\nis\\ngood\\nENDINSERT\\n')\n\n reduce_val = aggregator.reduce(return_val_1, return_val_2)\n self.assertIsNone(reduce_val)",
"def mlp_pool(vecs, names, hid_dim):\n if len(names) == 1:\n if names[0] == \"prefix_att\":\n pool = vecs[-1] #no raw\n elif names[0] == \"cross_att\":\n pool = vecs[-1] #no raw\n elif names[0] == \"concat_att\":\n #no raw\n if len(vecs) == 6:\n pool = fluid.layers.concat(vecs[3:5], axis=1)\n else: \n pool = fluid.layers.concat(vecs[2:4], axis=1)\n else:\n pool = fluid.layers.concat(vecs, axis=1)\n #pool = vecs[0] + vecs[1] + ...\n mlp_vec = fluid.layers.fc(input=pool, size=hid_dim * 2, act=\"leaky_relu\",\n param_attr=fluid.ParamAttr(name='%s_fc_weight' % names[0]),\n bias_attr=fluid.ParamAttr(name='%s_fc_bias' % names[0]))\n else:\n pools = []\n for idx, v in enumerate(vecs):\n vec = fluid.layers.fc(input=v, size=hid_dim, act=\"leaky_relu\",\n param_attr=fluid.ParamAttr(name='%s_fc_weight' % names[idx]),\n bias_attr=fluid.ParamAttr(name='%s_fc_bias' % names[idx]))\n pools.append(vec)\n if len(pools) > 2 and len(pools) % 2 == 0:\n merge_pools = []\n for idx in range(len(pools) / 2):\n v = fluid.layers.concat([pools[idx], pools[idx + len(pools) / 2]], axis=1)\n vec = fluid.layers.fc(input=v, size=hid_dim, act=\"leaky_relu\",\n param_attr=fluid.ParamAttr(name='%s_fc_weight' % names[idx].split('_')[0]),\n bias_attr=fluid.ParamAttr(name='%s_fc_bias' % names[idx].split('_')[0]))\n merge_pools.append(vec)\n pools = merge_pools\n\n mlp_vec = fluid.layers.concat(pools, axis=1)\n return mlp_vec",
"def map_summarize(self):\n if self.aligner == \"hisat2\":\n build([hisat2.SummarizeHisatMap(fastq_dic=self.fastq_dic,\n workdir=self.workdir,\n indexfile=self.hisat_index,\n num_cpus=self.num_cpus,\n kingdom=self.kingdom)],\n local_scheduler=self.local_scheduler, workers=1)\n elif self.aligner in [\"STAR\", \"star\"]:\n build([star.SummarizeStarMap(fastq_dic=self.fastq_dic,\n workdir=self.workdir,\n stardb_dir=self.stardb_dir,\n num_cpus=self.num_cpus)],\n local_scheduler=self.local_scheduler, workers=1)",
"def combine(self, states, tasks):\n self._assert_is_batched(states, tasks)\n return self._tf_call(self._combine, states, tasks)",
"def __reduce__(self):\r\n # We need to remove 'joblib' from the end of cachedir\r\n cachedir = self.cachedir[:-7] if self.cachedir is not None else None\r\n return (self.__class__, (cachedir,\r\n self.mmap_mode, self.compress, self._verbose))",
"def generate_aggregates(self) -> None:\n self.create_count_map()\n self.create_total_count()\n self.create_n_1_gram_map()"
]
| [
"0.5690839",
"0.5648045",
"0.5546011",
"0.5538413",
"0.55374104",
"0.54235184",
"0.5396628",
"0.53945255",
"0.53736633",
"0.5349251",
"0.53477794",
"0.53184974",
"0.5250934",
"0.5247123",
"0.52167875",
"0.5189451",
"0.517974",
"0.51670843",
"0.51543236",
"0.5147",
"0.51386416",
"0.5124568",
"0.512286",
"0.5108598",
"0.510549",
"0.50926805",
"0.5084432",
"0.5079132",
"0.50713956",
"0.5055952"
]
| 0.7103352 | 0 |
Test whether can parse a pipe | def test_pipe():
parser = CmdParser([posandtwo, valprog])
out = parser.parse("posandtwo | valprog")
assert isinstance(out[0], ProgramNode)
assert out[0].program_desc == posandtwo
assert isinstance(out[1], PipeNode)
assert isinstance(out[2], ProgramNode)
assert out[2].program_desc == valprog
assert isinstance(out[3], EndOfCommandNode) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_pipe2():\n parser = CmdParser([posandtwo, valprog])\n out = parser.parse(\"posandtwo | valprog | posandtwo\")\n assert isinstance(out[0], ProgramNode)\n assert isinstance(out[1], PipeNode)\n assert isinstance(out[2], ProgramNode)\n assert isinstance(out[3], PipeNode)\n assert isinstance(out[4], ProgramNode)\n assert isinstance(out[5], EndOfCommandNode)",
"def eof(self):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tresult = pservlet.pipe_eof(self._pipe_desc)\n\t\tif result > 0: return True\n\t\telif result == 0: return False\n\t\traise PlumberExceptions.PlumberNativeException(\"Cannot finish the API call to pipe_eof\")",
"def test_parse_simple_quote_with_pipe_character(self):\n with self.assertRaisesRegexp(Exception, \"the quote included an embedded pipe character (|)\"):\n api.parse_quote(\" Quote with | character - Author\", simple_format=True)",
"def is_pipe_success(self, pipe_res):\n return reduce(\n lambda x, y: bool(x) & bool(y),\n pipe_res)",
"def pipe(required=True, mode='r'):\n def validate(ctx, param, value):\n if value is not None:\n return click.open_file(value, mode=mode, lazy=True), value\n\n get_stream = click.get_binary_stream if 'b' in mode else click.get_text_stream\n\n if 'r' not in mode:\n return get_stream('stdout'), None\n\n stream = get_stream('stdin')\n\n if not stream.isatty():\n return stream, None\n\n if required:\n raise click.MissingParameter(ctx=ctx, param=param)\n\n return None, None\n return validate",
"def more_data(pipe_out):\n r, _, _ = select.select([pipe_out], [], [], 0)\n return bool(r)",
"def test_pip_show_nodata(self):\n self.assertEqual(jc.parsers.pip_show.parse('', quiet=True), [])",
"def testNumberPipeOneLine(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4 |')\n self.assertAlmostEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)",
"def pipe(*args, **kwargs):\n return parser(*args, **kwargs)",
"def is_raw_read(command): \n if command.startswith('<READ') and command.endswith('>') and \\\n is_valid_raw(command):\n return True\n else: \n return False\n # end if",
"def validate(self) -> None:\n super().validate()\n if self.pipe_mode.value is SocketMode.CONNECT and self.pipe_format.value is None:\n raise Error(\"'pipe_format' required for CONNECT pipe mode.\")",
"def is_using_stdin(paths: list[str]) -> bool:\n return \"-\" in paths",
"def validate_input(pipe, input, transform):\n valid = True\n\n if not transform:\n if isinstance(input, list):\n for doc in input:\n if not isinstance(doc, Document):\n valid = False\n break\n else:\n if not isinstance(input, Document):\n valid = False\n\n if not valid:\n raise InputError(pipe)\n \n return valid",
"def read(pipe, line):\n\n c = pipe.read(1)\n if c != \"\":\n o = c.decode('utf-8')\n if o != '\\n':\n line += o\n return line, False\n else:\n return line, True\n else:\n return line, False",
"def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))",
"def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))",
"def postparsing_postcmd(self, stop):\n if not sys.platform.startswith('win'):\n # Fix those annoying problems that occur with terminal programs like \"less\" when you pipe to them\n if self.stdin.isatty():\n proc = subprocess.Popen(shlex.split('stty sane'))\n proc.communicate()\n return stop",
"def testNumberPipeTwoLines(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4')\n self.assertFalse(pl.inPipeline)\n self.assertEqual(4, pl.stdin)\n repl.runCommandLine('')\n self.assertEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)",
"def validate_Exec_Shell(result, _dummy_command, _dummy_regex=None):\n return result is not None",
"def __can_read_command_line(self, pid):\n return os.path.isfile('/proc/%d/cmdline' % pid)",
"def test_probabilistic_parsers():",
"def is_terminal(p):\n return isinstance(p, _TerminalPacket)",
"def popen(self, *args, **kwargs):\n if args == (['grep', 'foo'],):\n return\n raise ValueError(self, args, kwargs)",
"def getflag(self, flag):\n\t\treturn (pservlet.pipe_get_flags(self._pipe_desc) & flag) != 0",
"def validate_stdin(context, param, value):\n # check if input is a file or stdin\n if value.name == '<stdin>':\n # raise error if stdin is empty\n if sys.stdin.isatty():\n raise click.BadParameter('you need to pipe something to stdin')\n\n return value",
"def is_pipeline(self):\n try:\n self.pipeline\n except ObjectDoesNotExist:\n return False\n return True",
"def check_pipe_setup(pipe=None, sequence=False, j=False):\n\n # The data pipe.\n if pipe == None:\n pipe = pipes.cdp_name()\n\n # Get the data pipe.\n dp = pipes.get_pipe(pipe)\n\n # Test if the current data pipe exists.\n check_pipe(pipe)\n\n # Test if sequence data exists.\n if sequence and not exists_mol_res_spin_data(pipe):\n raise RelaxNoSequenceError(pipe)\n\n # Test if J coupling data exists.\n if j:\n # Search for interatomic data.\n data = False\n for interatom in interatomic_loop():\n if hasattr(interatom, 'j_coupling'):\n data = True\n break\n\n # No data.\n if not data:\n raise RelaxNoJError()",
"def valid_syntax(command):\n\n for ev, value in bash_iter(command, syntax_check=True):\n if ev == \"err\":\n if value.endswith(\"syntax error: unexpected end of file\"):\n return False\n if \"unexpected EOF while looking for matching\" in value:\n return False\n if \"here-document at line\" in value:\n return False\n return value == 0",
"def check_streaming(self, arg: str):\n if not arg:\n return False\n elif arg.startswith(\"sdo:\"):\n print(\"[check_streaming] File is for streaming\")\n tmp_list = arg.splitlines()\n tmp_list.pop(0)\n for x in tmp_list:\n if self.check_name(x) is False:\n return False\n return True\n else:\n return False",
"def testPipeFound(self):\n safeFoundHelper(self)\n self.assertCurrentState(safe.Seeking)"
]
| [
"0.68179655",
"0.6040547",
"0.6036297",
"0.5914417",
"0.5893625",
"0.578709",
"0.557524",
"0.5528742",
"0.55239904",
"0.5494209",
"0.5474658",
"0.53684044",
"0.53115535",
"0.5283069",
"0.52190304",
"0.52190304",
"0.51897234",
"0.5186336",
"0.514774",
"0.5147619",
"0.51244754",
"0.5118334",
"0.50937027",
"0.5072641",
"0.5068751",
"0.5053408",
"0.503025",
"0.5013484",
"0.5010886",
"0.49964947"
]
| 0.7330331 | 0 |
Test whether can parse several pipes | def test_pipe2():
parser = CmdParser([posandtwo, valprog])
out = parser.parse("posandtwo | valprog | posandtwo")
assert isinstance(out[0], ProgramNode)
assert isinstance(out[1], PipeNode)
assert isinstance(out[2], ProgramNode)
assert isinstance(out[3], PipeNode)
assert isinstance(out[4], ProgramNode)
assert isinstance(out[5], EndOfCommandNode) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_pipe():\n parser = CmdParser([posandtwo, valprog])\n out = parser.parse(\"posandtwo | valprog\")\n assert isinstance(out[0], ProgramNode)\n assert out[0].program_desc == posandtwo\n assert isinstance(out[1], PipeNode)\n assert isinstance(out[2], ProgramNode)\n assert out[2].program_desc == valprog\n assert isinstance(out[3], EndOfCommandNode)",
"def more_data(pipe_out):\n r, _, _ = select.select([pipe_out], [], [], 0)\n return bool(r)",
"def handle_commands(packets, arguments):\r\n # if limit number is -1 meaning no limit\r\n limit_number = -1\r\n matched_packets = []\r\n\r\n # handle only -c flag occurs\r\n if len(arguments) == 2 and arguments[0] == \"-c\" and isinstance(int(arguments[1]), int):\r\n limit_number = int(arguments[1])\r\n for pck in packets:\r\n while limit_number > 0:\r\n output_generator(pck)\r\n limit_number -= 1\r\n return\r\n\r\n # while loop until the whole arguments is popped\r\n arg = arguments.popleft()\r\n while arg:\r\n # handle and\r\n if arg == \"and\":\r\n if len(arguments) == 0:\r\n print(\"Commands should be followed by \\\"and\\\" operator.\")\r\n sys.exit()\r\n else:\r\n # commands after and\r\n arg = arguments.popleft()\r\n new_matched, arg = handle_filter(packets, arg, arguments)\r\n\r\n matched_packets = [x for x in matched_packets if x in new_matched]\r\n\r\n # handle or\r\n elif arg == \"or\":\r\n if len(arguments) == 0:\r\n print(\"Commands should be followed by \\\"or\\\" operators.\")\r\n sys.exit()\r\n else:\r\n arg = arguments.popleft()\r\n\r\n new_matched, arg = handle_filter(packets, arg, arguments)\r\n\r\n matched_packets = [x for x in matched_packets if x not in new_matched] +\\\r\n [x for x in matched_packets if x in new_matched] +\\\r\n [x for x in new_matched if x not in matched_packets]\r\n # handle -c limit flag\r\n elif arg == \"-c\":\r\n if len(arguments) == 0:\r\n print(\"\\\"-c\\\" flag should be f commands.\")\r\n sys.exit()\r\n else:\r\n arg = arguments.popleft()\r\n limit_number = int(arg)\r\n # handle other commands\r\n else:\r\n matched_packets, arg = handle_filter(packets, arg, arguments)\r\n\r\n if len(arguments) != 0:\r\n arg = arguments.popleft()\r\n else:\r\n # break if arguments is empty\r\n break\r\n for pkt in matched_packets:\r\n # pass the matched packet to the output generator\r\n if limit_number == -1:\r\n output_generator(pkt)\r\n elif limit_number > 0:\r\n while limit_number > 0:\r\n output_generator(pkt)\r\n limit_number -= 1",
"def is_pipe_success(self, pipe_res):\n return reduce(\n lambda x, y: bool(x) & bool(y),\n pipe_res)",
"def eof(self):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tresult = pservlet.pipe_eof(self._pipe_desc)\n\t\tif result > 0: return True\n\t\telif result == 0: return False\n\t\traise PlumberExceptions.PlumberNativeException(\"Cannot finish the API call to pipe_eof\")",
"def any(self, fifo: int, /) -> bool:",
"def testNumberPipeOneLine(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4 |')\n self.assertAlmostEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)",
"def pipe(*args, **kwargs):\n return parser(*args, **kwargs)",
"def validate_input(pipe, input, transform):\n valid = True\n\n if not transform:\n if isinstance(input, list):\n for doc in input:\n if not isinstance(doc, Document):\n valid = False\n break\n else:\n if not isinstance(input, Document):\n valid = False\n\n if not valid:\n raise InputError(pipe)\n \n return valid",
"def test_probabilistic_parsers():",
"def is_using_stdin(paths: list[str]) -> bool:\n return \"-\" in paths",
"def collision_pipes(self, pipes_list: list):\n result = False\n for pipe in pipes_list:\n if self.x_pos + self.width > pipe.x_pos and self.x_pos < pipe.x_pos + pipe.width:\n if self.y_pos < pipe.y_pos_up + pipe.height: # collide with top\n result = True\n break\n elif self.y_pos + self.height > pipe.y_pos_down: # collide with bottom\n result = True\n break\n return result",
"def run(self):\n assert len(self.elements) >= 2, \"In order flow, pipe needs 2 or more elements\"\n in_pipe = self.elements[0]\n other_pipes = self.elements[1:-1]\n out_pipe = self.elements[-1]\n\n self.make_assertions(in_pipe, other_pipes, out_pipe)\n\n for data in in_pipe.grasp():\n write = True\n\n for element in other_pipes:\n if isinstance(element, elements.DataPypElement):\n data = element.extend(data)\n elif isinstance(element, elements.FilterPypElement):\n if not element.stay(data):\n write = False\n break\n if write:\n out_pipe.extract(data)",
"def test_sqpp_long_or_chain(self):\n self.assertEqual(self.parser.parse_query('p0 or p1 or p2 or p3 or p4'),\n ['+', 'p0', '|', 'p1', '|', 'p2', '|', 'p3', '|', 'p4'])",
"def testNumberPipeTwoLines(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4')\n self.assertFalse(pl.inPipeline)\n self.assertEqual(4, pl.stdin)\n repl.runCommandLine('')\n self.assertEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)",
"def _canProcessTags(self, grammar, pos_tags):\n badTags = []\n for tag in pos_tags:\n if tag not in grammar.tags:\n badTags.append(tag)\n logger.debug(\"Grammar can't handle tag:\" + tag)\n if badTags:\n return False\n else:\n return True",
"def pipes(self): \n return self._link_reg.pipes",
"def num_pipes(self):\n return len(self._link_reg.pipe_names)",
"def is_multi_commands(args: list) -> bool:\n for arg in args:\n if not isinstance(arg, list):\n return False\n # all elements must be lists\n return True",
"def test_multiple_parsers():\n rules = []\n first_parser = BlockParser(rules)\n assert len(first_parser.rules) == 0\n\n rules.append((lambda x: True, 1.0))\n second_parser = BlockParser(rules)\n assert len(second_parser.rules) == 1\n\n assert len(first_parser.rules) == 0, \"Non-local mutation of a parser's rules\"",
"def check_streaming(self, arg: str):\n if not arg:\n return False\n elif arg.startswith(\"sdo:\"):\n print(\"[check_streaming] File is for streaming\")\n tmp_list = arg.splitlines()\n tmp_list.pop(0)\n for x in tmp_list:\n if self.check_name(x) is False:\n return False\n return True\n else:\n return False",
"def pipe(required=True, mode='r'):\n def validate(ctx, param, value):\n if value is not None:\n return click.open_file(value, mode=mode, lazy=True), value\n\n get_stream = click.get_binary_stream if 'b' in mode else click.get_text_stream\n\n if 'r' not in mode:\n return get_stream('stdout'), None\n\n stream = get_stream('stdin')\n\n if not stream.isatty():\n return stream, None\n\n if required:\n raise click.MissingParameter(ctx=ctx, param=param)\n\n return None, None\n return validate",
"def test_parse_simple_quote_with_pipe_character(self):\n with self.assertRaisesRegexp(Exception, \"the quote included an embedded pipe character (|)\"):\n api.parse_quote(\" Quote with | character - Author\", simple_format=True)",
"def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))",
"def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))",
"def has_more_commands(self):\n return not self.eof",
"def make_assertions(input_pipe, other_pipes, output_pipe):\n assert isinstance(input_pipe, elements.InPypElement), 'Wrong input element type, want a InPypElement!'\n assert isinstance(output_pipe, elements.OutPypElement), 'Wrong output element type, want a OutPypElement!'\n for other_pipe in other_pipes:\n assert isinstance(other_pipe, elements.MidPypElement), 'Wrong middle element type, want a MidPypElement!'",
"def test_basic_parsers():",
"def done_parsing(self):\n # STUDENT\n return (self.input_buffer_len() == 1 ) and (self.stack_len()==1) \n # END STUDENT",
"def test_multiple_identical_series(self):\n assert parse_command('test{{A,B}}{{A,B}}') == [\n ('testAA', {}), ('testBB', {})]"
]
| [
"0.701313",
"0.59645516",
"0.56327486",
"0.55999887",
"0.55907416",
"0.53708684",
"0.53358024",
"0.5299428",
"0.5264803",
"0.5258192",
"0.52323353",
"0.521374",
"0.51996046",
"0.51893926",
"0.518938",
"0.5164227",
"0.51499546",
"0.51329315",
"0.51180947",
"0.5110935",
"0.50976235",
"0.5097358",
"0.5082109",
"0.50492424",
"0.50492424",
"0.50434124",
"0.5039468",
"0.50204",
"0.5005018",
"0.49935812"
]
| 0.6755077 | 1 |
A test to see if can handle findlike single dash for long args | def test_findlike():
parser = CmdParser([findlike])
out = parser.parse("findlike . -name foo")
assert out[0].arguments[0].present == True
assert out[0].arguments[0].value == "foo"
assert out[0].arguments[1].present == True
assert out[0].arguments[1].value == "."
assert out[0].as_shell_string() == "findlike . -name foo" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_arg_option_long_only(self):\n optional_long = [\n arg for arg in cli_args.values() if len(arg.flags) == 1 and arg.flags[0].startswith(\"-\")\n ]\n for arg in optional_long:\n assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[0]) is None, f\"{arg.flags[0]} is not match\"",
"def _check_valid_command_argument(valid_list, args):\n if args in valid_list:\n return 0\n else:\n return -1",
"def test_command_only_long_option(self):\n pattern_1 = re.compile(\"\\\"-[a-zA-Z]\\\"\")\n pattern_2 = re.compile(\"'-[a-zA-Z]'\")\n current_dir = os.path.dirname(os.path.abspath(__file__))\n ignore = [\"__init__.py\", \"__pycache__\", os.path.basename(__file__)]\n for test_file in os.listdir(current_dir):\n if test_file in ignore:\n continue\n match = []\n with open(os.path.join(current_dir, test_file), \"r\") as f:\n content = f.read()\n match.extend(pattern_1.findall(content))\n match.extend(pattern_2.findall(content))\n self.assertListEqual(\n [],\n match,\n \"Should use long option in test for more clearer intent, \"\n f\"but get {match} in {test_file}\"\n )",
"def check_args(args, iam='gfind', allow_no_coords=False):\n\n args = gargs.check_common_args(args, iam, allow_no_coords=allow_no_coords)\n\n return args",
"def test_arg_option_mix_short_long(self):\n optional_mix = [\n arg for arg in cli_args.values() if len(arg.flags) == 2 and arg.flags[0].startswith(\"-\")\n ]\n for arg in optional_mix:\n assert LEGAL_SHORT_OPTION_PATTERN.match(arg.flags[0]) is not None, f\"{arg.flags[0]} is not match\"\n assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[1]) is None, f\"{arg.flags[1]} is not match\"",
"def _validate_show_command(args):\n return _check_entry_name(args)",
"def cmd_has_option(self, executable, search_option, arg=None):\n if not executable:\n return False\n arg_list = []\n if arg and is_genstr(arg):\n arg_list = [arg]\n elif isinstance(arg, list):\n arg_list = arg\n out = Uprocess().get_output([executable] + arg_list + [\"--help\"])\n if out and search_option in re.split(r\"[=|\\*\\[\\]\\n,; ]+\", out):\n return True\n return False",
"def test_cmdlineproc_test2():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"-about\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"\"\n assert parameters[\"executableargs\"] == \"\"\n assert longbowargs == [\"-about\"]",
"def test_subcommand_arg_flag_conflict(self):\n subcommand = {\n key: val\n for key, val in cli_parser.__dict__.items()\n if key.isupper() and key.startswith(\"COMMANDS\")\n }\n for group, command in subcommand.items():\n for com in command:\n position = [\n a.flags[0] for a in com.args if (len(a.flags) == 1 and not a.flags[0].startswith(\"-\"))\n ]\n conflict_position = [arg for arg, count in Counter(position).items() if count > 1]\n assert [] == conflict_position, (\n f\"Command group {group} function {com.name} have conflict \"\n f\"position flags {conflict_position}\"\n )\n\n long_option = [\n a.flags[0] for a in com.args if (len(a.flags) == 1 and a.flags[0].startswith(\"-\"))\n ] + [a.flags[1] for a in com.args if len(a.flags) == 2]\n conflict_long_option = [arg for arg, count in Counter(long_option).items() if count > 1]\n assert [] == conflict_long_option, (\n f\"Command group {group} function {com.name} have conflict \"\n f\"long option flags {conflict_long_option}\"\n )\n\n short_option = [a.flags[0] for a in com.args if len(a.flags) == 2]\n conflict_short_option = [arg for arg, count in Counter(short_option).items() if count > 1]\n assert [] == conflict_short_option, (\n f\"Command group {group} function {com.name} have conflict \"\n f\"short option flags {conflict_short_option}\"\n )",
"def test_cmdlineproc_test3():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"--about\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"\"\n assert parameters[\"executableargs\"] == \"\"\n assert longbowargs == [\"--about\"]",
"def issafe(arg):\n return arg.find(';') == -1 and arg.find('|') == -1",
"def process_arg(arg):\n return False",
"def _check_args(self, args_):\n\n pass",
"def valid_args(args):\n return args is not None and len(args) > 0",
"def is_valid_command(self, string):\n return string[:3] == \"--!\"",
"def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False",
"def _arguments_valid(self) -> bool:\n return self.find and self.near and self.max_results >= 1",
"def validate_args(args):\n command = args[0]\n args_length = len(args) - 1\n return VALID_COMMANDS[command] == args_length",
"def check_backtester_args(parser: ArgumentParser, args: Namespace) -> None:\n check_ignoreodds_arg(parser, args)",
"def check_argv():\n parser = argparse.ArgumentParser(description=__doc__.strip().split(\"\\n\")[0], add_help=False)\n parser.add_argument(\"segment_fn\", type=str, help=\"pickled segmentation file\")\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()",
"def test_duplicate_flags():\n parser = CmdParser([noArgs, onearg])\n with pytest.raises(CmdParseError):\n out = parser.parse(\"onearg -a -a\")",
"def validate_args() -> bool:\n if len(argv) == 1 or \\\n '--help' in argv:\n print(usage)\n return False\n return True",
"def test_at_most_no_count_no_default_args_optional():\n class TestCmdLine(CmdLine):\n yaml_def = '''\n supported_options:\n - category:\n options:\n - name : test_opt\n long : test-opt\n opt : param\n multi_type: at-most\n required : false\n '''\n test_opt = None\n args = \"util-name --test-opt cmdline1 cmdline2\"\n parse_result = TestCmdLine.parse(args)\n assert parse_result.value == ParseResultEnum.PARSE_ERROR.value\n assert \"Arg parse error at\" in TestCmdLine.parse_errors[0]",
"def test_at_most_no_count_no_default_args_required():\n class TestCmdLine(CmdLine):\n yaml_def = '''\n supported_options:\n - category:\n options:\n - name : test_opt\n long : test-opt\n opt : param\n multi_type: at-most\n required : true\n '''\n test_opt = None\n args = \"util-name --test-opt cmdline1 cmdline2\"\n parse_result = TestCmdLine.parse(args)\n assert parse_result.value == ParseResultEnum.PARSE_ERROR.value\n assert \"Arg parse error at\" in TestCmdLine.parse_errors[0]",
"def _is_help(argv):\n if len(argv) == 0:\n return True\n return _HELP_RE.search(argv[0])",
"def checkArguments ( ) :\r\n\r\n if len( sys.argv ) <= 1 : return None\r\n\r\n\r\n # splits the arguments that contain quotes\r\n \r\n wordList = [ ]\r\n\r\n for argument in sys.argv :\r\n\r\n wordList.extend( argument.split( '\"' ) )\r\n\r\n\r\n # places all the arguments that start with \"--\" at the end, and joins the others into words\r\n\r\n noMinusList = [ ]\r\n\r\n minusList = [ ]\r\n\r\n argument = \"\"\r\n\r\n for word in wordList[ 1 : ] :\r\n\r\n # strips spaces and quotes\r\n \r\n word = word.strip( \" \\\"'\" ) \r\n\r\n if word.startswith( \"--\" ) :\r\n\r\n minusList.append( word )\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n argument = \"\"\r\n\r\n elif argument == \"\" :\r\n\r\n argument = word\r\n\r\n else :\r\n\r\n argument = argument + \" \" + word\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n\r\n # library = 1st argument of the form \"-- ... /\" that exists\r\n\r\n libraryPath = None\r\n\r\n for argument in minusList :\r\n\r\n if ( ( argument.endswith( os.sep ) ) and ( os.path.exists( argument.strip( \"- \" ) ) ) ) :\r\n\r\n libraryPath = argument.strip( \"-\" )\r\n\r\n break\r\n\r\n # recomposes the command line\r\n \r\n sys.argv = wordList[ : 1 ] + noMinusList + minusList \r\n\r\n return libraryPath",
"def CommandArgs(args):\n if len(args) > 1:\n if args[1].startswith('--'):\n option = args[1] [2:]\n if len(args) > 2:\n content = args[2]\n return option, content\n return True, None\n return False, None",
"def is_allowed_on_small_struni(*args):\n return _ida_hexrays.is_allowed_on_small_struni(*args)",
"def hasCommand():\n args = sys.argv[1:]\n if '--help' in args:\n return False\n if '-h' in args:\n return False\n for arg in args:\n if arg and not arg.startswith('-'):\n return True\n return False",
"def is_valid_command(args):\n if args.command is not None:\n return True\n return False"
]
| [
"0.6788855",
"0.6294759",
"0.6258076",
"0.61646134",
"0.61308974",
"0.6072468",
"0.5958455",
"0.58832896",
"0.58533525",
"0.5805959",
"0.5746239",
"0.5735969",
"0.57151854",
"0.5693454",
"0.56681514",
"0.56567216",
"0.5641007",
"0.56351584",
"0.5633745",
"0.5623828",
"0.56227994",
"0.56122214",
"0.5565506",
"0.5546281",
"0.55258346",
"0.55126107",
"0.5496702",
"0.5486085",
"0.5459815",
"0.5449736"
]
| 0.64588284 | 1 |
Dont expect duplicate flags unless told. | def test_duplicate_flags():
parser = CmdParser([noArgs, onearg])
with pytest.raises(CmdParseError):
out = parser.parse("onearg -a -a") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_duplicate(self, state):\n pass",
"def SynchronizeFlags(self):\n pass",
"def test_checkFlags(self):\n self.failUnlessEqual(self.nice.opts['aflag'], 1)\n self.failUnlessEqual(self.nice.opts['flout'], 0)",
"def resetFlags():\r\n for flag in flags:\r\n flags[flag] = False",
"def flag():\n pass",
"def test_addFlagsSilently(self):\n self._flagsSilentlyTest('addFlags', b'+FLAGS.SILENT')",
"def checkallflags(flags_with_values,flags_withoutvalues,cldic):\r\n if len(set(flags_with_values).intersection(set(flags_without_values))) > 0:\r\n print ( \"error some flags appear in two lists of flags, with and without required values:\",set(flags_with_values).intersection(set(flags_without_values)))\r\n printcommandset()\r\n sys.exit(1)\r\n for flag in set(flags_with_values).union(set(flags_withoutvalues)):\r\n if flag not in cldic:\r\n print ( \"error some flag mismatch between strings of flags and dictionary of flags:\",flag)\r\n printcommandset()\r\n sys.exit(1)\r\n return",
"def flagSet():\r\n for flag in flags:\r\n if flags[flag]:\r\n return True\r\n return False",
"def test_subcommand_arg_flag_conflict(self):\n subcommand = {\n key: val\n for key, val in cli_parser.__dict__.items()\n if key.isupper() and key.startswith(\"COMMANDS\")\n }\n for group, command in subcommand.items():\n for com in command:\n position = [\n a.flags[0] for a in com.args if (len(a.flags) == 1 and not a.flags[0].startswith(\"-\"))\n ]\n conflict_position = [arg for arg, count in Counter(position).items() if count > 1]\n assert [] == conflict_position, (\n f\"Command group {group} function {com.name} have conflict \"\n f\"position flags {conflict_position}\"\n )\n\n long_option = [\n a.flags[0] for a in com.args if (len(a.flags) == 1 and a.flags[0].startswith(\"-\"))\n ] + [a.flags[1] for a in com.args if len(a.flags) == 2]\n conflict_long_option = [arg for arg, count in Counter(long_option).items() if count > 1]\n assert [] == conflict_long_option, (\n f\"Command group {group} function {com.name} have conflict \"\n f\"long option flags {conflict_long_option}\"\n )\n\n short_option = [a.flags[0] for a in com.args if len(a.flags) == 2]\n conflict_short_option = [arg for arg, count in Counter(short_option).items() if count > 1]\n assert [] == conflict_short_option, (\n f\"Command group {group} function {com.name} have conflict \"\n f\"short option flags {conflict_short_option}\"\n )",
"def test_addFlagsSilentlyWithUnsolicitedData(self):\n self._flagsSilentlyWithUnsolicitedDataTest('addFlags', b'+FLAGS.SILENT')",
"def test_arg_option_mix_short_long(self):\n optional_mix = [\n arg for arg in cli_args.values() if len(arg.flags) == 2 and arg.flags[0].startswith(\"-\")\n ]\n for arg in optional_mix:\n assert LEGAL_SHORT_OPTION_PATTERN.match(arg.flags[0]) is not None, f\"{arg.flags[0]} is not match\"\n assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[1]) is None, f\"{arg.flags[1]} is not match\"",
"def test_ignore_dups(self):\n class Test(pyperry.Base): pass\n Test.attributes('id', 'poop', 'poop')\n\n self.assertEqual(Test.defined_attributes, set(['id', 'poop']))",
"def take_action_on_flags(self, *args, **kwargs):\r\n pass",
"def test_identical(self):\n write this test!",
"def override_if_not_in_args(flag, argument, args):\r\n if flag not in args:\r\n args.extend([flag, argument])",
"def test_flags(self):\n self.check_search(\n dict(flag_contact=u'yes'),\n [u'Tackle', u'DoubleSlap', u'Ice Punch', u'Bite', u'Fly'],\n 'flimsy search by flag',\n )\n\n self.check_search(\n dict(flag_mirror=u'no'),\n [u'Counter', u'Curse', u'Focus Punch', u'Sunny Day'],\n 'better search by flag',\n )\n\n self.check_search(\n dict(flag_contact=u'no', name=u'punch'),\n [],\n 'searching by nega-flag',\n exact=True,\n )",
"def dupable_matches_required(self):\n return 2",
"def flags(self) -> UserFlag:",
"def test_handle_flag_error(self):\n pass",
"def is_known(combo):\n return not _NONE_NONE & combo",
"def verify_common(self, tool_name, tool_instance):\n pos_args, kw_args = tool_instance.tool.call_args\n self.assertEqual(pos_args[0], (self.BIN_DIR, tool_name))\n self.assertEqual(kw_args[\"flags\"], self.FLAGS)\n\n # Existing options were retained\n for flag, expected in self.OPTIONS.items():\n self.assertEqual(kw_args[\"options\"][flag], expected)",
"def testduplicate(self):\n a = AttributeAbility(['ST',], 3)\n self.assertTrue(a.duplicate(a))\n self.assertTrue(a.duplicate(AttributeAbility(['ST',], 3)))\n self.assertTrue(a.duplicate(AttributeAbility(['ST',], 5)))\n self.assertFalse(a.duplicate(AttributeAbility(['DX',], 5)))",
"def test_reuse_options(self):\n\n self.git.commit(\n message='new: XXX commit',\n author='Bob <[email protected]>',\n date='2000-01-01 10:00:00',\n allow_empty=True)\n self.git.commit(\n message='new: XYZ commit',\n author='Bob <[email protected]>',\n date='2000-01-01 10:00:00',\n allow_empty=True)\n self.git.commit(\n message='new: normal commit !minor',\n author='Bob <[email protected]>',\n date='2000-01-01 10:00:00',\n allow_empty=True)\n\n gitchangelog.file_put_contents(\n \".gitchangelog.rc\",\n \"ignore_regexps += [r'XXX', ]\")\n\n changelog = w('$tprog')\n self.assertNotContains(\n changelog, \"XXX\",\n msg=\"Should not contain commit with XXX in it... \"\n \"content of changelog:\\n%s\" % changelog)\n self.assertContains(\n changelog, \"XYZ\",\n msg=\"Should contain commit with XYZ in it... \"\n \"content of changelog:\\n%s\" % changelog)\n self.assertNotContains(\n changelog, \"!minor\",\n msg=\"Shouldn't contain !minor tagged commit neither... \"\n \"content of changelog:\\n%s\" % changelog)",
"def no_abab():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"abab\", prompt=False\n ).stdout(\"NO\", regex=False\n ).exit()",
"def check_potentially_fake(self):\n # Check if the second group of numbers is different than 0000\n if(self.code[5:9] != \"0000\"):\n self.filters |= Filters.PotentiallyFake",
"def TransformFlags(self) -> _n_2_t_0[bool]:",
"def testduplicate(self):\n self.assertTrue(AmuletAbility('Control Dragon').duplicate(\n AmuletAbility('Control Dragon')))\n self.assertFalse(AmuletAbility('Control Dragon').duplicate(\n AmuletAbility('Control NPC')))\n self.assertTrue(AmuletAbility('Proof', element='Fire').duplicate(\n AmuletAbility('Proof', element='Fire')))\n self.assertFalse(AmuletAbility('Proof', element='Fire').duplicate(\n AmuletAbility('Proof', element='Water')))\n self.assertTrue(AmuletAbility('Attribute', attr='ST').duplicate(\n AmuletAbility('Attribute', attr='ST')))\n self.assertFalse(AmuletAbility('Attribute', attr='ST').duplicate(\n AmuletAbility('Attribute', attr='DX')))\n self.assertTrue(AmuletAbility('Skepticism', size=3).duplicate(\n AmuletAbility('Skepticism', size=3)))\n self.assertTrue(AmuletAbility('Skepticism', size=3).duplicate(\n AmuletAbility('Skepticism', size=5)))",
"def test_duplicate_ids2():\n assert query_row(db_conf, 'osm_buildings', 51001)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51001) == None\n assert query_row(db_conf, 'osm_buildings', -51011)['type'] == 'mp'\n assert query_row(db_conf, 'osm_buildings', 51011) == None",
"def verifyDistinct( options, data ):\n tot = 0\n for c in data.chrNames:\n s = set()\n d = mafDataOrNone( data.mafBlocksByChrom, c )\n if d is None:\n continue\n for mb in d:\n for i in xrange( mb.refStart, mb.refEnd + 1):\n if i in s:\n sys.stderr.write('duplicate base found! %s %d [%d-%d], %s [%d-%d]\\n'\n % (mb.refChr, i, mb.refStart, mb.refEnd, \n mb.pairChr, mb.pairStart, mb.pairEnd ))\n sys.exit( 1 )\n else:\n s.add( i )\n tot += len( s )\n sys.stderr.write( 'Verify all bases sent to be binned are distinct: Found %s distinct bases in the alignment to the reference genome, no duplicates, OK.\\n' % tot)",
"def check_unique(self):\n pass"
]
| [
"0.61147404",
"0.60242957",
"0.59610164",
"0.5883106",
"0.5855223",
"0.5831722",
"0.57295144",
"0.56410885",
"0.5606265",
"0.5586294",
"0.5546479",
"0.54805815",
"0.5478503",
"0.5455299",
"0.5423713",
"0.5400602",
"0.539708",
"0.53953934",
"0.53726554",
"0.53534085",
"0.5338658",
"0.53319275",
"0.531606",
"0.5288059",
"0.52454627",
"0.5242638",
"0.52340406",
"0.5233316",
"0.5227785",
"0.5214666"
]
| 0.66306114 | 0 |
stop the execution and print out the captured error message. | def stop_err(msg):
sys.stderr.write('%s\n' % msg)
sys.exit(-1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop(self):\n if self.debug:\n print(\"%s stop\" % self.name)\n self.force_exit()",
"def _stop(self):\n self.display_end_message()",
"def stop() -> None:",
"def stop_err(msg, error_level=1):\n sys.stderr.write(\"%s\\n\" % msg)\n sys.exit(error_level)",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop_execution(self):\n self.send_message(\"control.stop\",None)",
"def stop(self) -> None:\n ...",
"def finalize_error():\n print('')\n exit(-1)",
"def exit(self):\n if self.debug:\n print(\"%s exit\" % self.name)\n self.stop()",
"def stop_and_outputlogMessage(message):\n assert False\n outputlogMessage(message)",
"def stop (self):\n pass",
"def stop (self):\n pass",
"def stop(self):\n self.exit.set()",
"def stop(self):\n self.exit.set()",
"def stop_procedure(self):\n pass",
"def __exit__(self, type=None, value=None, traceback=None):\n self.stop()",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass"
]
| [
"0.7179926",
"0.6989759",
"0.69344425",
"0.68685615",
"0.68195295",
"0.68195295",
"0.6798366",
"0.6798366",
"0.6798366",
"0.6798366",
"0.67834127",
"0.6706759",
"0.6693671",
"0.66405547",
"0.66277504",
"0.6595876",
"0.6595876",
"0.6584673",
"0.6584673",
"0.6577877",
"0.6571223",
"0.6569489",
"0.6569489",
"0.6569489",
"0.6569489",
"0.6569489",
"0.6569489",
"0.6569489",
"0.6569489",
"0.6569489"
]
| 0.7336493 | 0 |
Returns a list of iteration steps corrsponding to multiple of the stepsize time. | def get_steps_by_regular_time_interval(times, stepsize, max_time=None):
if max_time is None:
max_time = times[-1]
chosen_times = arange(stepsize,max_time,stepsize)
return get_steps_by_times(times,chosen_times) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _getsteps(num_of_steps, limit):\n steps = []\n current = 0.0\n for i in range(0, num_of_steps):\n if i == num_of_steps - 1:\n steps.append(int(round(limit)))\n else:\n steps.append(int(round(current)))\n current += float(limit) / float(num_of_steps - 1)\n return steps",
"def step(v, direction, step_size):\n return [v_i + step_size * direction_i\n for v_i, direction_i in zip(v, direction)]",
"def step(v, direction, step_size):\n return [v_i + step_size * direction_i\n for v_i, direction_i in zip(v, direction)]",
"def getSteps():",
"def _TIME2STEPS(time):\n return int(time*1000)",
"def iterate_list_specific_step_size(list, step_size):\n for i in range(0, len(list), step_size):\n yield list[i:i + step_size]\n return",
"def get_next_steps(self, steps):\n step_list = []\n\n steps_remaining = set(steps.keys())\n counter = 0\n max_counter = 10000\n next_steps = set()\n\n for step in steps_remaining:\n dependencies = steps[step]\n if len(dependencies) == 0:\n next_steps.add(step)\n\n # this is the list of things that can be take for work now\n return sorted(next_steps)",
"def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps",
"def time_step(self, dt, Nsteps=1):\n assert Nsteps >= 0\n self.dt = dt\n if Nsteps > 0:\n self.psi_mod_x *= self.x_evolve_half\n for num_iter in xrange(Nsteps - 1):\n self.compute_k_from_x()",
"def total_steps(self) -> global___Expression:",
"def get_steps(ts, dt=None, time=None):\n if time is not None:\n step, _ = get_step_and_info(ts, time)\n steps = [step]\n elif dt is not None and dt > 0.:\n steps = []\n time_max = ts.times[-1]\n time = ts.times[0]\n while time <= time_max:\n step, _ = get_step_and_info(ts, time)\n steps.append(step)\n time += dt\n else:\n steps = range(len(ts))\n return steps",
"def n_timesteps(self) -> int:\n return len(self.time)",
"def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)",
"def make_times_with_n_step(self, start, end, n):\n self.target_times = []\n step = start\n delta = old_div((end - start), float(n))\n while step <= end:\n self.target_times.append(step)\n step += delta",
"def time_grid(time_step=30):\n if time_step < 1 or time_step > 60:\n raise ValueError('Time resolution should be between 0 and 60 [s]')\n half_step = time_step/SECONDS_PER_HOUR/2\n return np.arange(half_step, 24+half_step, half_step*2)",
"def xSteps(self,start,ziel,steps=10):\n erg=[]\n wert=(ziel-start)/(steps)\n for i in range(1, steps+1):\n erg.append(round(start+wert*i,2))\n return erg",
"def batch_steps(num_examples, batch_size):\n steps = num_examples // batch_size\n if num_examples % batch_size > 0:\n steps += 1\n return steps",
"def convert_to_fixed_timesteps(trajectories, timestep_size=60):\n\n def time_to_timestep(row):\n h, m, s = map(int, row[\"Time\"].split(':'))\n timestep = round((h*3600+m*60+s)/timestep_size)\n return timestep\n\n for i, t in trajectories.items():\n t[\"Timestep\"] = t.apply(time_to_timestep, axis=1)\n t = t.drop_duplicates(\"Timestep\")\n t = t.set_index(\"Timestep\")\n trajectories[i] = t\n \n return trajectories",
"def steps(self, length):\n steps = max(1, round(self.length / length, 0))\n return 1.0 / steps, int(steps)",
"def _calculate_step_sizes(x_size, y_size, num_chunks):\n # First we try to split only along fast x axis\n xstep = max(1, int(x_size / num_chunks))\n\n # More chunks are needed only if xstep gives us fewer chunks than\n # requested.\n x_chunks = int(x_size / xstep)\n\n if x_chunks >= num_chunks:\n ystep = y_size\n else:\n # The x and y loops are nested, so the number of chunks\n # is multiplicative, not additive. Calculate the number\n # of y chunks we need to get at num_chunks.\n y_chunks = int(num_chunks / x_chunks) + 1\n ystep = max(1, int(y_size / y_chunks))\n\n return xstep, ystep",
"def _iterate_steps(self):\n mixture_size = self.parameters['fixed_mixture_size']\n if mixture_size is None:\n return 2 ** self.Ns\n else:\n return scipy.special.comb(self.Ns, mixture_size, exact=True)",
"def n_steps(self, actions):\n return [self.step(action) for action in actions]",
"def _incremental_steps(start, end, steps, stepsize=None):\n if stepsize is None: step_size = (end - start) / np.maximum((steps - 1), 1)\n gradient = []\n for i in range(steps):\n value = start + step_size * i\n gradient.append(value)\n\n return gradient[0:steps]",
"def _STEPS2TIME(step):\n return step/1000.",
"def expansion_steps(self):\n return self._p",
"def get_steps(self):\n return self.steps",
"def _get_steps(self):\n return self.steps",
"def setStepSize(self, step_size):\n assert isinstance(step_size, int)\n self.step_size = step_size\n self.step_directions = [np.array([i[0], i[1]]) for i in [(0,0),\n (0,step_size),\n (0,-step_size),\n (step_size, 0),\n (-step_size,0)]]",
"def steps(self):\n for step in self._steps:\n yield step",
"def tsp_walk(n, op, nsteps):\n result = []\n t = list(range(n))\n result.append(tuple(t))\n for i in range(nsteps):\n t = op(t)\n result.append(tuple(t))\n return result"
]
| [
"0.6362053",
"0.6323178",
"0.6312536",
"0.6141533",
"0.60039514",
"0.6000389",
"0.5971617",
"0.5956176",
"0.5933951",
"0.589602",
"0.5892214",
"0.5855558",
"0.5854265",
"0.5840716",
"0.58393264",
"0.5838846",
"0.5836665",
"0.5818361",
"0.58091825",
"0.577143",
"0.57600915",
"0.5756506",
"0.5751556",
"0.57328296",
"0.5700731",
"0.5694354",
"0.5662326",
"0.56497616",
"0.56440395",
"0.5640982"
]
| 0.6738848 | 0 |
Returns a list of iteration steps (think indices) corresponding to the first time after each of the sorted eval_times. | def get_steps_by_times(times,chosen_times):
time_steps = []
chosen_index = 0
for i in range(len(times)):
if times[i] >= chosen_times[chosen_index]:
time_steps.append(i)
chosen_index += 1
if chosen_index == len(chosen_times): #We're done
return time_steps
if chosen_index != len(chosen_times): #In case the last chosen time(s) is past the end of times.
time_steps.append(i)
return time_steps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)",
"def get_timesteps(self, timesteps=None, every=None, samples=None):\n timedict = self.generate_timedict()\n\n if samples is not None:\n stride = len(timedict) // samples\n return [str(i) for i in range(0, len(timedict), stride)]\n elif every is not None:\n max_time_index = len(timedict) - 1\n indices = [str(i) for i in range(0, max_time_index, every)]\n\n # Inclusive interval: including the maximum time step index,\n # even when it does not fit with the stride\n if not str(max_time_index) in indices:\n indices.append(str(max_time_index))\n\n return indices\n elif timesteps is not None:\n idx = np.searchsorted(timedict, timesteps)\n return [str(i) for i in idx] if isinstance(timesteps, list) else [str(idx)]",
"def get_next_steps(self, steps):\n step_list = []\n\n steps_remaining = set(steps.keys())\n counter = 0\n max_counter = 10000\n next_steps = set()\n\n for step in steps_remaining:\n dependencies = steps[step]\n if len(dependencies) == 0:\n next_steps.add(step)\n\n # this is the list of things that can be take for work now\n return sorted(next_steps)",
"def get_times(self):\n times = []\n for i in range(1, len(self.events)):\n times.append(self.events[i-1].elapsed_time(self.events[i]))\n return times",
"def times(self):\n return list(range(self._max_time + 1))",
"def _get_batches_starting_indexes(self):\n\n indexes = numpy.arange(0, self.num_frames, self.recurrence)\n indexes = numpy.random.permutation(indexes)\n\n # Shift starting indexes by self.recurrence//2 half the time\n if self.batch_num % 2 == 1:\n indexes = indexes[(indexes + self.recurrence) % self.num_frames_per_proc != 0]\n indexes += self.recurrence // 2\n self.batch_num += 1\n\n num_indexes = self.batch_size // self.recurrence\n batches_starting_indexes = [indexes[i:i+num_indexes] for i in range(0, len(indexes), num_indexes)]\n\n return batches_starting_indexes",
"def getSteps():",
"def schedule_to_timesteps(schedule):\n max_timestep = np.max(schedule.flatten())\n out = [[] for _ in range(max_timestep + 1)]\n (rows, cols, _) = schedule.shape\n\n for row in range(rows):\n for col in range(cols):\n for time_step in schedule[row][col]:\n out[time_step].append((row, col))\n\n return out",
"def playXSteps(self, solver, plays):\n res = []\n for play in plays:\n x = play[0]\n while self.lastEndStep < x:\n solver.solveOneStep()\n self.lastEndStep += 1\n res.append(solver.gm.getGameState())\n return res",
"def runs(self):\n cycles = []\n temp_cycle = []\n perm = self.array_form\n for i in xrange(len(perm) - 1):\n current_elem = perm[i]\n next_elem = perm[i+1]\n\n if current_elem < next_elem:\n temp_cycle.append(current_elem)\n continue\n\n if current_elem > next_elem:\n if temp_cycle != [] and \\\n temp_cycle[-1] < current_elem:\n temp_cycle.append(current_elem)\n cycles.append(temp_cycle)\n temp_cycle = []\n continue\n else:\n if temp_cycle != []:\n cycles.append(temp_cycle)\n cycles.append([current_elem])\n temp_cycle = []\n continue\n\n if current_elem < next_elem:\n temp_cycle.append(next_elem)\n cycles.append(temp_cycle)\n else:\n if temp_cycle != []:\n cycles.append(temp_cycle)\n cycles.append([next_elem])\n return cycles",
"def topairs(self):\n return list(zip(self._times, self._values))",
"def ensemble_times(self):\n return self['validtime'].values",
"def NextEventSortKey(self):\n return (self._NextEventStep(), self._NextEventWallTime())",
"def _timeasc_traversal(self, root):\n return map(int, self.timeasc(root))",
"def get_steps_by_regular_time_interval(times, stepsize, max_time=None):\n if max_time is None:\n max_time = times[-1]\n chosen_times = arange(stepsize,max_time,stepsize) \n return get_steps_by_times(times,chosen_times)",
"def get_steps(ts, dt=None, time=None):\n if time is not None:\n step, _ = get_step_and_info(ts, time)\n steps = [step]\n elif dt is not None and dt > 0.:\n steps = []\n time_max = ts.times[-1]\n time = ts.times[0]\n while time <= time_max:\n step, _ = get_step_and_info(ts, time)\n steps.append(step)\n time += dt\n else:\n steps = range(len(ts))\n return steps",
"def getTimes():",
"def getTimes():",
"def getTimes():",
"def tsp_walk(n, op, nsteps):\n result = []\n t = list(range(n))\n result.append(tuple(t))\n for i in range(nsteps):\n t = op(t)\n result.append(tuple(t))\n return result",
"def time_steps(self):\n\n import pandas as pd\n import numpy as np\n\n # get time step list and format as dataframe\n df = pd.DataFrame(self.doc.getTimeSteps())\n df.columns = [\"step_index\", \"simulation_time\", \"timestep_length\"]\n\n # add calendar column\n if self.doc.getReferenceTime() is not None:\n df[\"simulation_date\"] = [self.doc.getReferenceTime() + timedelta(days=simtime) for simtime in\n df.simulation_time]\n else:\n df[\"simulation_date\"] = np.nan\n\n # add file index\n df.index.name = \"file_index\"\n df.reset_index(inplace=True)\n\n # reorder columns\n df = df[['file_index', 'step_index', 'simulation_time', 'simulation_date', 'timestep_length']]\n\n return df.set_index(\"file_index\")",
"def calculate_intervals(tick_times: List[float]) -> List[float]:\n return [tick_times[i] - tick_times[i - 1] for i in range(1, len(tick_times))]",
"def eta(self):\n\t\tresult = []\n\t\ttime = 0\n\t\tposition = self\n\t\tfor delay, star, order, num_ships in self.orders:\n\t\t\ttime += int(math.ceil(star.distance(position) / self.galaxy.fleet_speed))\n\t\t\tposition = star\n\t\t\tresult.append(time)\n\t\t\ttime += delay\n\t\treturn result",
"def n_steps(self, actions):\n return [self.step(action) for action in actions]",
"def window_start_times(self):\n window_length = self.window_length\n if window_length is not None:\n return np.array(self.times) - window_length / 2",
"def run_idxs(self):\n return list(range(len(self._h5[RUNS])))",
"def get_timesteps(cur):\n info = cur.execute('SELECT initialyear, initialmonth, '\n 'duration FROM info').fetchone()\n init_year = info['initialyear']\n init_month = info['initialmonth']\n duration = info['duration']\n timestep = np.linspace(0, duration - 1, num=duration)\n\n return init_year, init_month, duration, timestep",
"def get_best_schedule(self):\n # load the model weights\n self.models = [load_model(f'dqn_{task_id}.h5')\n for task_id in range(len(self.models))]\n\n actions = []\n is_scheduled = [0] * len(self.models)\n\n while (not all(is_scheduled)):\n observation = OrderedDict([('is_scheduled', is_scheduled)])\n best_action = self._get_best_action(observation)\n actions.append(best_action)\n is_scheduled[best_action['task_id']] = best_action['start_time']\n\n return actions",
"def _timedesc_traversal(self, root):\n return map(int, self.timedesc(root))",
"def evaluate_multiple_time(self, time=200, save_dir='../multi_eval/INN/'):\r\n tk = time_keeper(os.path.join(save_dir, 'evaluation_time.txt'))\r\n save_dir += self.flags.data_set\r\n for i in range(time):\r\n self.evaluate(save_dir=save_dir, prefix='inference' + str(i))\r\n tk.record(i)"
]
| [
"0.58948535",
"0.58846205",
"0.5860858",
"0.5825945",
"0.57967544",
"0.57846475",
"0.57793707",
"0.57188964",
"0.56175333",
"0.5607721",
"0.5590442",
"0.55815035",
"0.55307555",
"0.5507173",
"0.5487642",
"0.5441355",
"0.5435563",
"0.5435563",
"0.5435563",
"0.5398938",
"0.5314357",
"0.5301815",
"0.5256945",
"0.5247805",
"0.52392596",
"0.51967275",
"0.51822084",
"0.5174969",
"0.51687914",
"0.5167184"
]
| 0.65579444 | 0 |
Compute operation "option" on the item of the two files | def compute_opt(filenames, item, option):
if len(filenames) == 2:
file1 = os.path.join(ROOT_DATA, filenames[0])
file2 = os.path.join(ROOT_DATA, filenames[1])
if not os.path.isfile(file1) or not os.path.isfile(file2):
print("One of the given files '%s' or '%s' does not exists" %
(file1, file2))
return None
root1 = ROOT.TFile.Open(file1, "READ")
root2 = ROOT.TFile.Open(file2, "READ")
if not root1 or not root2:
print("one of the fiven files '%s' or '%s' is not a root file" %
(file1, file2))
return None
result = []
# first add the two histograms
h1 = root1.Get(str(item))
if h1:
result.append({"root": filenames[0], "items": {
item: json.loads(str(ROOT.TBufferJSON.ConvertToJSON(h1)))}})
else:
print("ERROR item %s not found in file %s" % (item, file1))
return None
h2 = root2.Get(str(item))
if h2:
result.append({"root": filenames[1], "items": {
item: json.loads(str(ROOT.TBufferJSON.ConvertToJSON(h2)))}})
else:
print("ERROR item %s not found in file %s" % (item, file2))
return None
# compute what required
if option == "Kolmogorov":
result.append({"KSTest": h2.KolmogorovTest(h1)})
if option == "Difference":
h1.Add(h2, -1)
h1.SetName("Difference")
h1.SetOption("HIST")
h1.SetMinimum(-100)
elif option == "Ratio":
h1.Divide(h2)
h1.SetName("Ratio")
h1.SetOption("HIST")
if h1:
result.append(
{"computed_result": json.loads(str(ROOT.TBufferJSON.ConvertToJSON(h1)))})
return result
else:
print("ERROR. We can compute the %s only beetween two files (given %s)" % (
option, filenames))
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main(file1, file2, uniq1=False, uniq2=False, union=False, tab=False, col1=1, col2=1):\n delimiter = \"\\t\" if tab else \",\"\n\n idx1 = col1 - 1\n idx2 = col2 - 1\n\n # Figure out what the mode of operation is.\n show = ISECT\n show = UNIQ1 if uniq1 else show\n show = UNIQ2 if uniq2 else show\n show = UNION if union else show\n\n if not os.path.isfile(file1):\n print(f\"file not found: {file1}\")\n sys.exit(1)\n\n # Get a stream for each file\n stream1 = get_stream(file1)\n stream2 = get_stream(file2)\n\n # Process the file.\n process(stream1=stream1, stream2=stream2, delimiter=delimiter, idx1=idx1, idx2=idx2, show=show)",
"def _operation_or(self, other):\n self._check_items(other)\n if self._active_procs is not None:\n raise DontCallWhenIterRunError('Do not call the operation in iter_run loop.')\n return ReadingSet(self._set | self._get_other_set(other))",
"def _command_operation(self, operation):\r\n if self.current_number != '0' and\\\r\n (self.current_operation is not None) and\\\r\n self.storage_number != '0':\r\n # check if user already selected a operation\r\n # if it is, do the equal function once and display\r\n # the resolve\r\n self._command_equal()\r\n self.current_operation = operation\r\n else:\r\n # else storage the operation\r\n self.storage_number = self.current_number\r\n self.current_operation = operation\r\n self.current_number = '0'",
"def _command_equal(self):\r\n if self.current_operation is None:\r\n # check if user selected a operation\r\n return\r\n elif self.current_operation == self.list[4]:\r\n # check operation if is ÷\r\n self.current_number = str(\r\n float(self.storage_number) /\r\n float(self.current_number))\r\n elif self.current_operation == self.list[9]:\r\n # check operation if is ×\r\n self.current_number = str(\r\n float(self.storage_number) *\r\n float(self.current_number))\r\n elif self.current_operation == self.list[14]:\r\n # check operation if is -\r\n self.current_number = str(\r\n float(self.storage_number) -\r\n float(self.current_number))\r\n elif self.current_operation == self.list[19]:\r\n # check operation if is +\r\n self.current_number = str(\r\n float(self.storage_number) +\r\n float(self.current_number))\r\n\r\n self.current_operation = None\r\n # reset the storage of operation\r",
"def cmpfile(file_left, file_right):\n nobv.visual_comparefile(file_left, file_right)",
"def _binaryop(self, other, op: str):\n raise NotImplementedError",
"def calc(a,b,operation):\n\n if operation == 'product':\n return a * b\n else:\n return a + b",
"def do_F1_F2_operation (df, cols, operation):\n df_f1 = df[create_F1_F2_cols(cols, output='F1')]\n df_f2 = df[create_F1_F2_cols(cols, output='F2')]\n\n if operation =='subtract':\n df_f1 = change_col_prefix(df_f1, 'F1_', 'F1mF2_')\n df_f1 = change_col_prefix(df_f1, 'FM_F1_', 'FM_F1mF2_')\n df_f2 = change_col_prefix(df_f2, 'F2_', 'F1mF2_')\n df_f2 = change_col_prefix(df_f2, 'FM_F2_', 'FM_F1mF2_')\n df_diff = df_f1 - df_f2\n elif operation == 'over':\n df_f1 = change_col_prefix(df_f1, 'F1_', 'F1oF2_')\n df_f1 = change_col_prefix(df_f1, 'FM_F1_', 'FM_F1oF2_')\n df_f2 = change_col_prefix(df_f2, 'F2_', 'F1oF2_')\n df_f2 = change_col_prefix(df_f2, 'FM_F2_', 'FM_F1oF2_')\n df_diff = df_f1/df_f2\n elif operation == 'pct_F1':\n df_f1 = change_col_prefix(df_f1, 'F1_', 'pct_F1_')\n df_f1 = change_col_prefix(df_f1, 'FM_F1_', 'pct_FM_F1_')\n df_f2 = change_col_prefix(df_f2, 'F2_', 'pct_F1_')\n df_f2 = change_col_prefix(df_f2, 'FM_F2_', 'pct_FM_F1_')\n df_diff = (df_f2 - df_f1)/df_f1\n\n return df_diff",
"def _operation_sub(self, first, second): \n self._check_items(first)\n self._check_items(second)\n if self._active_procs is not None:\n raise DontCallInIterRunError('Do not call the operation in iter_run loop.')\n return ReadingSet(self._get_other_set(first) - self._get_other_set(second))",
"def ie(self,cmd):\n if len(self.handle)==1:\n f0=self.handle[0]\n if cmd==\"in\":\n title=\"Function\\t\\t\\tInclusive\"\n lst=sorted(self.incl[f0].items(), key=lambda (k,v):v,reverse=True)\n if cmd==\"ex\":\n title=\"Function\\t\\t\\tExclusive\"\n lst=sorted(self.excl[f0].items(), key=lambda (k,v):v,reverse=True)\n else: \n f0=self.handle[0]\n f1=self.handle[1]\n if not self.incldiff:\n self.getdiff(self.incl[f0],self.total[f0],\n self.incl[f1],self.total[f1],\n self.incldiff)\n self.getdiff(self.excl[f0],self.total[f0],\n self.excl[f1],self.total[f1],\n self.excldiff)\n if cmd==\"in\":\n print(\"--Inclusive--\")\n title='Function\\t\\t\\t%s\\t%s\\tDiff' % (self.fname[0],self.fname[1])\n lst=sorted(self.incldiff.items(), key=lambda (k,v):v,reverse=True)\n if cmd==\"ex\":\n print(\"--Exclusive--\")\n title='Function\\t\\t\\t%s\\t%s\\tDiff' % (self.fname[0],self.fname[1])\n lst=sorted(self.excldiff.items(), key=lambda (k,v):v,reverse=True)\n if cmd==\"ina\": \n title=\"Function\\t\\t\\tInclusive\\tExclusive\"\n lst=sorted(self.incl[f0].items(), key=lambda (k,v):v,reverse=True)\n if cmd==\"exa\": \n title=\"Function\\t\\t\\tInclusive\\tExclusive\"\n lst=sorted(self.excl[f0].items(), key=lambda (k,v):v,reverse=True)\n print(title)\n for i in range(min(len(lst),self.lines)):\n (k,v)=lst[i][0],lst[i][1]\n if cmd==\"in\" or cmd==\"ex\":\n if len(self.handle)>1:\n if cmd==\"in\":\n pct1=self.pct_helper(k,self.incl[f0],self.total[f0])\n pct2=self.pct_helper(k,self.incl[f1],self.total[f1])\n else:\n pct1=self.pct_helper(k,self.excl[f0],self.total[f0])\n pct2=self.pct_helper(k,self.excl[f1],self.total[f1])\n row='%-30s%6.2f%%\\t\\t%6.2f%%\\t\\t%6.2f%%' % (k,pct1,pct2,v)\n else:\n row='%-30s %6.2f%%(%d)' % (k,100.0*v/self.total[f0],v)\n elif cmd==\"ina\":\n exv=self.excl[f0][k] if k in self.excl[f0] else 0\n row='%-30s %6.2f%%(%d)\\t%6.2f%%(%d)' % (k,\n 100.0*v/self.total[f0],v,\n 100.0*exv/self.total[f0], exv)\n else:\n inv=self.incl[f0][k] if k in self.incl[f0] else 0\n row='%-30s %6.2f%%(%d)\\t%6.2f%%(%d)' % (k,\n 100.0*inv/self.total[f0], inv,\n 100.0*v/self.total[f0], v)\n print(row)",
"def __cmp__(self, other):\n # note compare order deliberatly compares other first, because we want the opposite\n # of what normally be returned by the these tuples\n if isinstance(other, Operation):\n return cmp((other.is_ready, other.queue_priority, other.seq),(self.is_ready, self.queue_priority,self.seq))\n else:\n raise TypeError('Operations can only be compared to other Operation')",
"def algdelta(alg1, alg2, *args):\n file_delta = ord(alg2[0]) - ord(alg1[0])\n rank_delta = ord(alg2[1]) - ord(alg1[1])\n return file_delta, rank_delta",
"def perform_operation(operator, num_1, num_2):\n\n if operator == \"*\":\n return num_1 * num_2\n if operator == \"+\":\n return num_1 + num_2\n if operator == \"-\":\n return num_1 - num_2\n if operator == \"/\":\n return num_1 / num_2",
"def fileCompare(a, b):\n if a[\"file_run\"] > b[\"file_run\"]:\n return 1\n elif a[\"file_run\"] == b[\"file_run\"]:\n if a[\"file_lumi\"] > b[\"file_lumi\"]:\n return 1\n elif a[\"file_lumi\"] == b[\"file_lumi\"]:\n if a[\"file_first_event\"] > b[\"file_first_event\"]:\n return 1\n if a[\"file_first_event\"] == b[\"file_first_event\"]:\n return 0\n\n return -1",
"def calc(operand_1, operand_2):\n return operand_1 - operand_2",
"def calc(operand_1, operand_2):\n return operand_1 - operand_2",
"def calc(operand_a, operand_b):\n return operand_a - operand_b",
"def merge(self, op):\n self.__desc = listify(self.__desc, op.__desc)\n self.__name = listify(self.__name, op.__name)\n self.__label_pre = listify(self.__label_pre, op.__label_pre)\n self.__label_post = listify(self.__label_post, op.__label_post)",
"def solving(num1, num2, opt):\n res = 0\n if opt == '*':\n res = num1 * num2\n elif opt == '/':\n try:\n res = num1 / num2\n except ZeroDivisionError:\n res = 0\n elif opt == '+':\n res = num1 + num2\n elif opt == '-':\n res = num1 - num2\n return abs(res)",
"def diff_files_callback(self, option, opt_str, value, parser):\n assert value is None\n diff_files_args = []\n while parser.rargs:\n # Stop if we find a short- or long-form arg, or a '--'\n # Note that this doesn't handle negative numbers.\n arg = parser.rargs[0]\n if arg[:2] == \"--\" or (arg[:1] == \"-\" and len(arg) > 1):\n break\n else:\n diff_files_args.append(arg)\n del parser.rargs[0]\n\n if len(diff_files_args) not in (1, 2, 3):\n raise optparse.OptionValueError(\n _(\"wrong number of arguments supplied to --diff\"))\n parser.values.diff.append(diff_files_args)",
"def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2",
"def fun(op, v1, v2):\n if op == '+':\n return v1+v2\n elif op == '-':\n return v1-v2\n elif op == '*':\n return v1*v2\n elif op == '/':\n return v1",
"def operationMenu(cls) -> int:\n print(\"Text File Operations -->\")\n print(\"1. Remove special characters.\")\n print(\"2. Remove all single characters.\")\n print(\"3. Remove multiple spaces.\")\n print(\"4. Convert the text into lower case.\")\n print(\"5. Expand the contractions in the text.\")\n op = int(input(\"Enter option: \"))\n return op",
"def main():\n\n parser = argparse.ArgumentParser(\n description=\"Compare the metadata content of two files\"\n )\n\n parser.add_argument(\n \"files\",\n nargs=2,\n metavar=\"FILE\",\n help=\"The names of two files to compare\",\n )\n\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"print detailed output on screen\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--ordered\",\n action=\"store_true\",\n help=\"When comparing lists, check the element order too.\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--drop\",\n nargs=\"*\",\n default=None,\n metavar=\"KEY\",\n help=\"Keys to drop from metadata retrieved from file\",\n )\n\n parser.add_argument(\n \"-m\",\n \"--mode\",\n default=\"lite\",\n metavar=\"MODE\",\n type=str,\n choices=[\"tiny\", \"lite\", \"full\", \"peeker\"],\n help=\"\"\"\\\n This flag provides the user capability to select the amount of\n metadata retrieved. There three options:\n tiny (only those values used in PyJobTransforms),\n lite (same output as dump-athfile)\n and full ( all available data found)\n \"\"\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--type\",\n default=None,\n metavar=\"TYPE\",\n type=str,\n choices=[\"POOL\", \"BS\"],\n help=\"\"\"\\\n The file type of the input filename. By default, it tries to\n determine itself the file type of the input.\n \"\"\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--filter\",\n default=[],\n metavar=\"FILTER\",\n nargs=\"+\",\n type=str,\n help=\"Expression to select specific metadata fields to retrieve.\",\n )\n\n parser.add_argument(\n \"-x\",\n \"--diff-format\",\n default=\"simple\",\n type=str,\n choices=[\"simple\", \"diff\"],\n help=\"Switch between 'simple' or 'diff' style differences \",\n )\n\n parser.add_argument(\n \"--promote\",\n default=None,\n type=bool,\n help=\"Force promotion or not of the metadata keys \",\n )\n\n args = parser.parse_args()\n\n try:\n diff = meta_diff(\n args.files,\n verbose=args.verbose,\n ordered=args.ordered,\n drop=args.drop,\n mode=args.mode,\n meta_key_filter=args.filter,\n file_type=args.type,\n promote=args.promote,\n diff_format=args.diff_format,\n )\n except (ValueError, IndexError):\n print(\"you must supply two files to compare\")\n sys.exit(1)\n except ReferenceError:\n print(\"no such file\")\n sys.exit(1)\n\n if diff:\n print(\"\\n\".join(diff))\n sys.exit(1)\n\n sys.exit(0)",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def compare_files(input_index_file, output_index_file ):\n \n # -------------\n # open the input index file for reading\n # -------------\n input_set = open_read_file(input_index_file)\n\n # -------------\n # open the output index file for reading\n # -------------\n output_set = open_read_file(output_index_file)\n\n # -------------\n # get the difference in the files where\n # the input_set is the larger set\n # -------------\n unproc_files = set_difference(output_set, input_set)\n #print unproc_files\n\n return unproc_files",
"def fileCmp (working, ref, compare_content=0, verbose=0):\n\tif verbose and working and ref:\n\t\tprint \"fileCmp\\n\\t working: %s\\n\\tref: %s\" % (\n\t\t\tworking.path or \"no working path\", \n\t\t\tref.path or \"no reference path\")\n\t\t\n\tflag = \"UNASSIGNED\"\n\tdebugging = 0\n\t\n\tif ref and not working:\n\t\tflag = \"missing\"\n\t\n\telif not ref: # or not os.path.exists(ref.path):\n\t\tflag = \"new\"\n\t\t\n\telif isinstance (working, JloFile):\n\t\t# print \"ref: %s\" % ref.__class__.__name__\n\t\tif debugging:\n\t\t\tif not working.equals (ref):\n\t\t\t\tprint \"working file is different\"\n\t\t\t\t\n\t\t\tif not working.newerthan (ref):\n\t\t\t\tprint \"working file has same date as ref\"\n\t\t\n\t\t\tif working.modtime == ref.modtime:\n\t\t\t\tprint \"mods dates match\"\n\t\t\telse:\n\t\t\t\t# print \"wrk: %d ref: %d\" % (working.modtime,ref.modtime)\n\t\t\t\tprint \"wrk: %s ref: %s\" % \\\n\t\t\t\t\t(working.ppDate (working.modtime),\n\t\t\t\t\t working.ppDate (ref.modtime))\n\t\t\n\t\tif compare_content:\n\t\t\tif working.equals (ref):\n\t\t\t\tflag = \"\"\n\t\t\telse:\n\t\t\t\tflag = \"modified\"\n\t\t\t\t\n\t\telse:\n\t\t\tflag = \"\"\n\n\t\t\t\n\t\t\t\n\t\t\t# elif not working.newerthan (ref):\n\t\t\t# flag = \"obsolete-check\"\n\t\t# elif working.newerthan (ref) and not working.equals (ref):\n\t\t\t# flag = \"modified\"\n\t\t# elif not working.equals (ref):\n\t\t\t# print \"not modified\"\n\t\t\t# flag = \"different\"\n\t\t# elif working.newerthan (ref):\n\t\t\t# flag = \"modified\"\n\tif verbose and working:\n\t\tprint \"%s --> %s\" % (working.name, flag)\n\treturn flag",
"def main():\n args = utils.parse_args()\n strategy = optimize.optimal_strategy(args.file_1, args.file_2,\n args.mem_limit)\n\n start = time.time()\n print('Beginning operation')\n res = strategy.intersect(args.file_1, args.file_2, args.mem_limit)\n end = time.time()\n print(res.cardinality)\n print(f'Operation completed in {end - start} seconds')",
"def test_two_files():\n\n out_file = ''.join(\n random.choices(string.ascii_uppercase + string.digits, k=5))\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {tair} {amigo} -o {out_file}')\n assert rv == 0\n assert re.search('1: tair_heat.txt', out)\n assert re.search('2: amigo_heat.txt', out)\n assert re.search(\n f'Wrote 20 gene IDs from 2 files to file \"{out_file}\"', out)\n assert os.path.isfile(out_file)\n exp_two = '\\n'.join(\n sorted(\"\"\"\n AT5G12020 AT3G06400 AT2G33590 AT1G54050 AT5G67030 AT4G14690 AT1G16030 AT5G03720 AT3G10800 \n AT5G12140 AT1G64280 AT3G24500 AT3G09440 AT3G04120 AT4G19630 AT1G16540 AT2G22360 AT1G13930 \n AT5G41340 AT3G24520\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_two.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)"
]
| [
"0.5435761",
"0.5236189",
"0.5221077",
"0.5185513",
"0.51824975",
"0.51474214",
"0.5124852",
"0.51119435",
"0.5088273",
"0.49563205",
"0.49543867",
"0.4934598",
"0.49257588",
"0.49221843",
"0.49147666",
"0.49147666",
"0.49069288",
"0.48821086",
"0.48575395",
"0.4838069",
"0.4826706",
"0.48253638",
"0.48200232",
"0.47900397",
"0.47825682",
"0.47825682",
"0.4769813",
"0.47555766",
"0.47543904",
"0.47442475"
]
| 0.6201015 | 0 |
run application use gunicorn http server | def run_gunicorn_server(app):
from gunicorn.app.base import Application
class FlaskApplication(Application):
def init(self, parser, opts, args):
return {
'bind': '{0}:{1}'.format(FLASK_HOST, FLASK_PORT),
'workers': 4
}
def load(self):
return app
FlaskApplication().run() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gunicorn():\n # fmt: off\n if os.name == \"nt\":\n print(\"Sorry, gunicorn is not available on windows\")\n exit(1)\n specter_gunicorn = SpecterGunicornApp(config=None)\n specter_gunicorn.run()",
"def serve() -> None:\n uvicorn.run(\n \"bartender.web.application:get_app\",\n workers=settings.workers_count,\n host=settings.host,\n port=settings.port,\n reload=settings.reload,\n log_level=settings.log_level,\n factory=True,\n )",
"def start_gunicorn():\n with cd(env.code_dir):\n with _virtualenv():\n sudo('python manage.py run_gunicorn -w 4 -b 127.0.0.1:8000 --daemon')",
"def main():\r\n run_wsgi_app(app)",
"def run():\n app = Application()\n #app.sentry_client = AsyncSentryClient(app.settings['sentry_url'])\n http_server = HTTPServer(app, xheaders=True)\n http_server.listen(options.port)\n print('Running on port %d' % options.port)",
"def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])",
"def run_app():\n uvicorn.run(\n app,\n host='0.0.0.0',\n port=8000,\n log_config=log_configs.get_uvicorn_logger(configs.LOG_PATH)\n )",
"def start(**kwargs):\n # Project\n\n CustomWSGI(\n app=\"stats.api.main:api\",\n options={\n \"worker_class\": \"uvicorn.workers.UvicornWorker\",\n \"preload\": True,\n \"keepalive\": 10,\n \"command\": shutil.which(\"gunicorn\"),\n \"bind\": \":\".join(\n (format_listen_address(params.listen_address), str(params.listen_port))\n ),\n \"workers\": workers,\n \"loglevel\": loglevel,\n \"accesslog\": \"-\",\n \"errorlog\": \"-\",\n # \"logconfig_dict\": {\"formatters\": {\"generic\": {\"format\": \"%(message)s\"}}},\n **kwargs,\n },\n ).run()",
"def main(config=None):\n init = InitRepoPath(config)\n\n listen_address, port = init.get_listen_address()\n\n backend = DictBackend(init.get_backends())\n \n app = make_wsgi_chain(backend)\n server = make_server(listen_address, port, app,\n handler_class=WSGIRequestHandlerLogger,\n server_class=WSGIServerLogger)\n logger.info('Listening for HTTP connections on %s:%d',\n listen_address, port)\n server.serve_forever()",
"def handle(self, app, host, port, workers):\n bind = \"%s:%s\" % (host, str(port))\n\n workers = WORKERS\n pid_file = PIDFILE\n loglevel = LOGLEVEL\n\n # We have to be sure that the directory exist in order\n # to write there the pid file.\n mkpath(os.path.dirname(pid_file))\n\n if version_info < (0, 9, 0):\n raise RuntimeError(\"Unsupported gunicorn version! Required > 0.9.0\")\n else:\n class FlaskApplication(Application):\n def init(self, parser, opts, args):\n return {\n 'bind': bind,\n 'workers': workers,\n 'pidfile': pid_file,\n 'loglevel': loglevel,\n }\n\n def load(self):\n return app\n\n # Do not pass any cmdline options to gunicorn\n sys.argv = sys.argv[:2]\n\n logger_api.info(\"Logging to stderr with loglevel '%s'\" % loglevel)\n logger_api.info(\"Starting gunicorn...\")\n\n FlaskApplication().run()",
"def runserver():\n app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)",
"def run_app(app: object, options: Optional[Dict[str, Any]] = None) -> None:\n GunicornApplication(app, options=options).run()",
"def main():\n run_wsgi_app(APP)",
"def main():\n try:\n port = 8080\n ip = '0.0.0.0'\n http_server = WSGIServer((ip, port),\n app,\n log=logging,\n error_log=logging,\n )\n print(\"Server started at: {0}:{1}\".format(ip, port))\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # Do something here\n pass",
"def run_gunicorn(host, port, workers, timeout):\n from gunicorn.app.base import Application\n\n class FlaskApplication(Application):\n def init(self, parser, opts, args):\n return {\n 'bind': '{0}:{1}'.format(host, port),\n 'workers': workers, 'timeout': timeout\n }\n\n def load(self):\n return manager.app\n\n application = FlaskApplication()\n return application.run()",
"def pro_start():\r\n nginx_reload()\r\n gunicorn_start()",
"def run():\n\n # Construct a server.\n server = wsgiref.simple_server.make_server(\n _config[ 'address' ],\n _config[ 'port' ],\n application\n )\n\n # Run the server.\n server.serve_forever()\n\n # Return result.\n return 0",
"def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())",
"def web(host: str, port: str, loglevel: str) -> None:\n uvicorn.run(\"source.apps.web:App\", host=host, port=port, log_level=loglevel)",
"def start():\n from paste.deploy import loadapp, loadserver\n from moksha.config.environment import load_environment\n from moksha.config.middleware import make_app\n ini = 'config:' + path('development.ini').abspath()\n wsgi_app = loadapp(ini)\n serve = loadserver(ini)\n serve(wsgi_app)",
"def runserver():\n\tapp.run(host = '0.0.0.0', port = 5000)",
"def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())",
"def run_server(self, _):\n if not ENABLE_SERVER:\n logger.info('server not enabled, exit')\n return\n app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)",
"def main():\n tornado.options.parse_command_line()\n ioloop = tornado.ioloop.IOLoop.instance()\n http_server = tornado.httpserver.HTTPServer(App())\n http_server.listen(options.port)\n tornado.autoreload.start()\n ioloop.start()",
"def local_main():\n uvicorn.run(app, host=\"0.0.0.0\", port=5000)",
"def start():\n _with_deploy_env(['./bin/paster serve src/remix/oerpub/rhaptoslabs/production.ini --daemon'])",
"def start_server(self):\n app.run(host=str(self.__constants.host),\n port=int(self.__constants.port),\n debug=bool(self.__constants.runindebug))",
"def run(self):\n self.app.run(host=\"0.0.0.0\")",
"def start(self):\n self.serve_forever()",
"def start(self):\n self.serve_forever()"
]
| [
"0.748902",
"0.72147924",
"0.70594573",
"0.7044088",
"0.6984339",
"0.6954709",
"0.6949557",
"0.693541",
"0.6902215",
"0.6897665",
"0.6895318",
"0.68715054",
"0.6867967",
"0.6860533",
"0.6821104",
"0.67707294",
"0.67282206",
"0.67218506",
"0.669681",
"0.6695175",
"0.66906226",
"0.6630844",
"0.66283286",
"0.66036636",
"0.65620977",
"0.65265673",
"0.65060043",
"0.649526",
"0.64894027",
"0.64894027"
]
| 0.7617813 | 0 |
Execute raw cypher queries | def cypher(self, query: str, **parameters: str) -> Any:
try:
# results, meta = db.cypher_query(query, parameters)
results, _ = db.cypher_query(query, parameters)
except CypherSyntaxError as e:
log.warning(query)
log.error(f"Failed to execute Cypher Query\n{e}")
raise CypherSyntaxError("Failed to execute Cypher Query") from e
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cypher(self, query):\n from neomodel import db\n try:\n results, meta = db.cypher_query(query)\n except Exception as e:\n raise Exception(\n \"Failed to execute Cypher Query: %s\\n%s\" % (query, str(e)))\n return False\n # log.debug(\"Graph query.\\nResults: %s\\nMeta: %s\" % (results, meta))\n return results",
"def run_cypher_query(self, query):\n with self._driver.session() as session:\n session.write_transaction(self.add_input_graph, query)",
"def run_query(self, query: str) -> BoltStatementResult:\n with self.neo4j_driver.driver.session() as session:\n return session.run(query)",
"def cypher(self):\n kwargs = {'match': '',\n 'optional_match': '',\n 'where': '',\n 'with': '',\n 'return': ''}\n\n # generate initial match strings\n\n match_strings = set()\n withs = set()\n nodes = self.required_nodes()\n for node in nodes:\n if node.has_subquery:\n continue\n match_strings.add(node.for_match())\n withs.update(node.withs)\n\n kwargs['match'] = 'MATCH ' + ',\\n'.join(match_strings)\n\n # generate main filters\n\n properties = []\n for c in self._criterion:\n if c.in_subquery:\n continue\n properties.append(c.for_cypher())\n if properties:\n kwargs['where'] += 'WHERE ' + '\\nAND '.join(properties)\n\n optional_nodes = self.optional_nodes()\n optional_match_strings = []\n for node in optional_nodes:\n if node.has_subquery:\n continue\n optional_match_strings.append(node.for_match())\n withs.update(node.withs)\n if optional_match_strings:\n s = ''\n for i, o in enumerate(optional_match_strings):\n s += 'OPTIONAL MATCH ' + o + '\\n'\n kwargs['optional_match'] = s\n\n # generate subqueries\n\n with_statements = ['WITH ' + ', '.join(withs)]\n\n for node in nodes:\n if not node.has_subquery:\n continue\n statement = node.subquery(withs, self._criterion)\n with_statements.append(statement)\n\n withs.update(node.withs)\n\n for node in optional_nodes:\n if not node.has_subquery:\n continue\n statement = node.subquery(withs, self._criterion, optional=True)\n with_statements.append(statement)\n\n withs.update(node.withs)\n kwargs['with'] = '\\n'.join(with_statements)\n\n kwargs['return'] = self.generate_return()\n cypher = self.query_template.format(**kwargs)\n\n return cypher",
"def execute(self, *args, **kwargs):\n return self.engine.execute(*args, **kwargs)",
"def execute(self, *args, **kwargs):\n return self.engine.execute(*args, **kwargs)",
"def execute(self, statement):\n return self._engine.connect().execute(statement)",
"def _run_query(self):",
"def pg_execute(pg_conn, sql):\n print sql\n # XXX execute command",
"def execute_query(self, *args, **kwargs):",
"def execute(self, dbname, query, username=None, commit=False):\n with self.connect(dbname, username) as node_con:\n res = node_con.execute(query)\n if commit:\n node_con.commit()\n return res",
"def execute(self, *sql):\n # assemble the command and pass it on to the connection\n return self.postgres.execute(self.connection, \"\\n\".join(sql))",
"def _execute(self, stmt) -> sa.engine.ResultProxy:\n return self._engine.execute(stmt)",
"def execute(query):\n print query\n cursor.execute(query)",
"def execute(self):\n if self.sql is None:\n self.sql = self.construct_query()\n # Only SQL strings can be split, not (e.g.) SQLAlchemy statements.\n if self.multiple_statements and isinstance(self.sql, str):\n statements = self._split_sql()\n else:\n statements = [self.sql]\n single_statement = True if len(statements) == 1 and self.filename else False\n try:\n for statement in statements:\n result_proxy = self.cm.conn.execute(statement)\n log_string = self.filename if single_statement else str(statement)[:25]\n self.logger.info(\"Executed {} against {}\".format(log_string, self.cm.db))\n if result_proxy.cursor:\n return self.fetch_results(result_proxy)\n except Exception as e:\n self.logger.exception(e)\n raise",
"def execute_statement(self, statement):\n context = self.__context\n session = context.session()\n with session as connection:\n query_result = connection.execute(statement)\n\n return query_result",
"def execute_query(query, params={}, transaction=True, context=\"\"):\r\n if transaction:\r\n query = \"g.stopTransaction(FAILURE)\\n\" + query\r\n\r\n # If we have no hosts available raise an exception\r\n if len(_hosts) <= 0:\r\n raise ThunderdomeConnectionError('Attempt to execute query before calling thunderdome.connection.setup')\r\n \r\n host = _hosts[0]\r\n #url = 'http://{}/graphs/{}/tp/gremlin'.format(host.name, _graph_name)\r\n data = json.dumps({'script':query, 'params': params})\r\n headers = {'Content-Type':'application/json', 'Accept':'application/json', 'Accept-Charset':'utf-8'}\r\n import time\r\n try:\r\n start_time = time.time()\r\n conn = httplib.HTTPConnection(host.name, host.port)\r\n conn.request(\"POST\", '/graphs/{}/tp/gremlin'.format(_graph_name), data, headers)\r\n response = conn.getresponse()\r\n content = response.read()\r\n\r\n total_time = int((time.time() - start_time) * 1000)\r\n\r\n if context and _statsd:\r\n _statsd.timing(\"{}.timer\".format(context), total_time)\r\n _statsd.incr(\"{}.counter\".format(context))\r\n\r\n\r\n except socket.error as sock_err:\r\n if _statsd:\r\n total_time = int((time.time() - start_time) * 1000)\r\n _statsd.incr(\"thunderdome.socket_error\".format(context), total_time)\r\n raise ThunderdomeQueryError('Socket error during query - {}'.format(sock_err))\r\n except:\r\n raise\r\n \r\n logger.info(json.dumps(data))\r\n logger.info(content)\r\n\r\n try:\r\n response_data = json.loads(content)\r\n except ValueError as ve:\r\n raise ThunderdomeQueryError('Loading Rexster results failed: \"{}\"'.format(ve))\r\n \r\n if response.status != 200:\r\n if 'message' in response_data and len(response_data['message']) > 0:\r\n graph_missing_re = r\"Graph \\[(.*)\\] could not be found\"\r\n if re.search(graph_missing_re, response_data['message']):\r\n raise ThunderdomeGraphMissingError(response_data['message'])\r\n else:\r\n raise ThunderdomeQueryError(\r\n response_data['message'],\r\n response_data\r\n )\r\n else:\r\n if _statsd:\r\n _statsd.incr(\"{}.error\".format(context))\r\n raise ThunderdomeQueryError(\r\n response_data['error'],\r\n response_data\r\n )\r\n\r\n return response_data['results']",
"def cypher_transaction():\n session = cypher.Session(HOST)\n return session.create_transaction()",
"def run(self, statement):\n\n # Remove spaces and EOL\n statement = statement.strip()\n if not statement: # Empty string\n yield (None, None, None, None)\n\n # Split the sql into separate queries and run each one.\n # Unless it's saving a favorite query, in which case we\n # want to save them all together.\n if statement.startswith('\\\\fs'):\n components = [statement]\n\n else:\n components = sqlparse.split(statement)\n\n for sql in components:\n # Remove spaces, eol and semi-colons.\n sql = sql.rstrip(';')\n\n # \\G is treated specially since we have to set the expanded output.\n if sql.endswith('\\\\G'):\n special.set_expanded_output(True)\n sql = sql[:-2].strip()\n try: # Special command\n _logger.debug('Trying a dbspecial command. sql: %r', sql)\n cur = self.conn.cursor()\n for result in special.execute(cur, sql):\n yield result\n except special.CommandNotFound: # Regular SQL\n yield self.execute_normal_sql(sql)",
"def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:\n resp = client.read_opencypher(query)\n df = pd.DataFrame.from_dict(resp)\n return df",
"def test_execute(self):\n rset = self.connection.execute(self.rql, export_type=\"json\")\n self.assertTrue(len(rset) > 0)",
"def execute(self, query, pars=False):\n raise NotImplementedError",
"def execute_query(driver,query):\n with driver.session() as session:\n results = session.run(query)\n return results",
"def do(self, executor):\n sql, kw = self._assemble()\n return executor.execute(\n sql, kw\n )",
"def _raw(self, query: Any, data: Any = None):\n assert isinstance(query, str)\n\n conn = self._get_session()\n try:\n results = conn.execute(query)\n\n entity_items = []\n for item in results:\n entity = self.model_cls.to_entity(item)\n entity.state_.mark_retrieved()\n entity_items.append(entity)\n\n result = ResultSet(\n offset=0,\n limit=len(entity_items),\n total=len(entity_items),\n items=entity_items,\n )\n except DatabaseError as exc:\n logger.error(f\"Error while running raw query: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return result",
"def execute(self) -> Dict:\n return self.connection.execute({\"process_graph\": self.graph})",
"def execute(self) -> Dict:\n return self.session.execute({\"process_graph\": self.graph})",
"def execute(self, context):\n logging.info(f\"Running SQL :{self.sql}\")\n self.hook = TrinoHook()\n query = self.hook.run(self.sql, autocommit=self.autocommit, parameters=self.parameters)\n if self.xcom_push:\n return query",
"def execute(self, sql):\n return self.db.execute(sql)",
"def run(self):\n rows = None\n if self.sql.startswith('select'):\n conn = self.table.connect()\n with conn.cursor() as curs:\n try:\n curs.execute(self.sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {self.sql}:\n {error.code}\"\"\")\n self.excep = exc\n raise exc\n else:\n rows = curs.fetchall()\n # logging.critical(f\"\"\"executed {self.sql}\"\"\")\n self.result_exec = rows"
]
| [
"0.65917706",
"0.6311631",
"0.6257389",
"0.62430876",
"0.6125018",
"0.6125018",
"0.61153865",
"0.61023426",
"0.60645294",
"0.605868",
"0.60440093",
"0.6003765",
"0.59613854",
"0.58681923",
"0.58470386",
"0.582492",
"0.58190846",
"0.5815288",
"0.58127505",
"0.58115643",
"0.57973677",
"0.5749967",
"0.57139194",
"0.5694625",
"0.566193",
"0.56577456",
"0.56233346",
"0.5620461",
"0.5615086",
"0.5552042"
]
| 0.6914791 | 0 |
Strip and clean up terms from special characters. To be used in fuzzy search | def sanitize_input(term: str) -> str:
return term.strip().replace("*", "").replace("'", "\\'").replace("~", "") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text",
"def clean_word(self, word):\n return self.filter_pattern.sub(u'', word.lower())",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text",
"def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)",
"def _clean_term(self, term):\n return filter(lambda char: char in allowed_chars, term)",
"def clean(word):\n word = word.lower()\n stopwords = ['of', 'and','to', 'at', 'in', '@']\n word = re.sub(r'[\\&/\\-\\(\\)\\|\\@,\\]\\[]+', ' ', word)\n for stopword in stopwords:\n pattern = r'\\b' + stopword + r'\\b'\n pattern = re.compile(pattern)\n word = re.sub(pattern, '', word)\n word = re.sub(r'\\s\\s+', ' ', word)\n return word",
"def _remove_special_chars(self, doc: str):\n processed_tweet = re.sub('[\\.,!#¡\\?¿%:;´\"@”“&()\\|]', '', doc)\n return processed_tweet",
"def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)",
"def remove_special_characters(text):\n soup = BeautifulSoup(text, \"html.parser\")\n review = soup.get_text()\n review = r\"[^a-zA-z0-9\\s]\"\n review = re.sub(review, \"\", text)\n return review.lower()",
"def clean_text(s,stem=False):\n\tret = s.lower()\n\tret = re.sub(r'[^a-z ]',' ',ret)\n\tret = re.sub(r' +',' ',ret).strip()\n\tret = re.sub(r'see more occupations related to this (activity|skill|task)','',ret)\n\tif stem:\n\t\tret = ' '.join( stemmer.stem(word) for word in ret.split(' ') )\n\treturn ret",
"def clean_text(text):\n\n\n regex = re.compile('[\\.|\\-|\\,|\\?|\\_|\\:|\\\"|\\)|\\(\\)\\/|\\\\|\\>|\\<]')\n text = text.lower() # Turn everything to lower case\n text = regex.sub(' ', text).strip()\n out = re.sub(' +', ' ', text) # Reduce whitespace down to one\n \n return out",
"def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text",
"def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())",
"def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()",
"def clean(tweet):\n #Separates the contractions and the punctuation\n\n\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<user>\", \"\")\n tweet = re.sub(\"[!#.,\\\"]\", \"\", tweet).replace(\"<url>\", \"\")\n tweet = correct_spell(tweet)\n return tweet.strip().lower()",
"def removeApostrophes(self, words):\n\t\treturn self.__apostropheRegex.sub('', words)",
"def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r",
"def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text",
"def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()",
"def processword(word):\n word = word.lower()\n word = word.strip('()?,!`.-:\\\"\\n \\'')\n return word",
"def clean_text(text):\n text = text.lower()\n text = text.replace('\\xa0', ' ')\n text = text.replace('fls.', 'folhas ')\n text = text.replace('fl.', 'folha ')\n text = text.replace('arts.', 'artigos ')\n text = text.replace('art.', 'artigo ')\n text = re_tree_dots.sub('...', text)\n text = re.sub(r'\\.\\.\\.', ' ', text)\n text = re_remove_brackets.sub(' ', text)\n text = re_changehyphen.sub('-', text)\n text = re_remove_html.sub(' ', text)\n text = re_transform_numbers.sub('0', text)\n text = re_transform_url.sub('URL', text)\n text = re_transform_emails.sub('EMAIL', text)\n text = re_quotes_1.sub(r'\\1\"', text)\n text = re_quotes_2.sub(r'\"\\1', text)\n text = re_quotes_3.sub('\"', text)\n text = re.sub('\"', ' ', text)\n text = re_dots.sub('.', text)\n text = re_punctuation.sub(r'\\1', text)\n text = re_hiphen.sub(' - ', text)\n text = re_punkts.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_b.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_c.sub(r'\\1 \\2', text)\n text = re_doublequotes_1.sub('\\\"', text)\n text = re_doublequotes_2.sub('\\'', text)\n text = re_trim.sub(' ', text)\n return text.strip()",
"def remove_punct(self,text):",
"def clean_text(text):\n text = str(text).lower()\n text = text.strip(string.punctuation)\n text = re.sub(\"&\", '', text)\n text = re.sub(\"https\", '', text)\n text = re.sub('\\W\\s', '', text)\n text = re.sub('\\s,\\W', '', text)\n text = re.sub('[.!@#$%^&*()_,:;/-]', '', text)\n text = re.sub(\"\\d+\", '', text)\n\n return text",
"def clean_word(word: str) -> str:\n return re.sub(\n r\"^[,.'\\\"()!]+\", \"\", re.sub(r\"[,.'\\\"()!]+$\", \"\", word.lower())\n )",
"def remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence"
]
| [
"0.70704466",
"0.70424956",
"0.701857",
"0.701857",
"0.701857",
"0.701857",
"0.701857",
"0.701857",
"0.700928",
"0.6997776",
"0.69969887",
"0.6988537",
"0.698002",
"0.6951187",
"0.69446355",
"0.69331133",
"0.6931965",
"0.6915906",
"0.68655795",
"0.68475455",
"0.68451554",
"0.68219537",
"0.68135566",
"0.67825484",
"0.6728122",
"0.6700816",
"0.669739",
"0.66773087",
"0.6661505",
"0.66606027"
]
| 0.7121154 | 0 |
Returs tuple with joint states and TCP coordinates. | def __getitem__(self, item):
# exclude tcp orientation
return self._joint_states[item], self._tcp_coords[item][:2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_joint_states(self) -> Tuple[List[float], List[float], List[str]]:\n \n rospy.wait_for_service('/' + self.model_name + '/get_all_joint_states', timeout=2.0)\n try:\n resp = self.__get_all_joint_states()\n except rospy.ServiceException as e:\n print('Service did not process request:' + str(e))\n \n joint_positions = resp.joint_states.position\n joint_velocities = resp.joint_states.velocity\n joint_order = resp.joint_states.name\n \n return joint_positions, joint_velocities, joint_order",
"def state_(state):\n return tuple( [ tuple( row ) for row in state ] )",
"def build_joint_state_msg(self):\n \n js_msg = JointState()\n js_msg.header.stamp = rospy.Time.now()\n \n if self.joint_names == []:\n self.joint_names = [\"{}.{}\".format('hand', attr) \n for attr in ORI_ATTRIBUTES] + \\\n [\"{}.{}.{}\".format(finger, bone, ori) \n for finger in FINGER_NAMES \n for bone in FINGER_BONES\n for ori in ORI_ATTRIBUTES]\n LOG.v(\"Publishing JointState for the following joints: {}\".format(self.joint_names), \"start_transmit\")\n \n js_msg.position = [0.0] * len(self.joint_names)\n\n pos = 0\n # Build JointState. First the hand... \n for i, attr in enumerate(ORI_ATTRIBUTES):\n js_msg.name.append('hand.' + str(attr))\n \n # Roll precision hack\n if attr == 'roll':\n vector = self.hand.palm_normal\n else:\n vector = self.hand.direction\n \n js_msg.position[pos] = getattr(vector, attr)\n pos += 1\n\n # ...then the fingers\n for i, finger_name, finger in \\\n [(i, finger_name, self.fingers[finger_name]) \\\n for i, finger_name in enumerate(FINGER_NAMES)]:\n \n # LEAP API v2.0: Skeletal model\n # Get bones\n for j, bone_name, bone in \\\n [(j, bone_name, finger.bone(j)) \\\n for j, bone_name in enumerate(FINGER_BONES)]:\n\n # Fill the joint values one by one\n for k, attr in enumerate(ORI_ATTRIBUTES):\n\n joint_name = \"{}.{}.{}\".format(finger_name, bone_name, attr)\n joint_value = getattr(bone.direction, attr)\n \n js_msg.name.append(joint_name)\n js_msg.position[pos] = joint_value\n pos += 1\n \n # return the JointState message\n return js_msg",
"def get_joint_states(self, joints: List[str]) -> Tuple[List[float], List[float], List[str]]:\n assert all([j in self.joints.names for j in joints]), 'All joints requested must be in self.joints'\n \n rospy.wait_for_service('/' + self.model_name + '/get_joint_states', timeout=2.0)\n try:\n resp = self.__get_joint_states(joint_names=joints)\n except rospy.ServiceException as e:\n print('Service did not process request:' + str(e))\n \n joint_positions = resp.joint_states.position\n joint_velocities = resp.joint_states.velocity\n joint_order = resp.joint_states.name\n \n return joint_positions, joint_velocities, joint_order",
"def get_tuple(self):\n return (self.r, self.g, self.b)",
"def get_tuple(self):\n return (self.r, self.g, self.b)",
"def __getstate__(self) -> Tuple[object, ...]:\n basic: Tuple[object, ...] = (\n intern(self.output),\n intern(self.target),\n intern(self.input),\n self.comma_sep,\n )\n # Instance, delays and times are more rare - if unset don't include.\n if self.inst_in or self.inst_out or self.params or self.delay or self.times != -1:\n return (\n *basic,\n intern(self.inst_out) if self.inst_out is not None else None,\n intern(self.inst_in) if self.inst_in is not None else None,\n intern(self.params),\n self.delay,\n self.times,\n )\n else:\n return basic",
"def get_state(self):\n\n return self.t, self.x",
"def sub_callback(joint_state_msg):\n global joint_values\n print joint_values\n #TODO\n joint_values = joint_state_msg.position",
"def current_state(self):\n return self.matrix\n #return tuple([tuple(i) for i in self.matrix]) # + [(self.rows, self.cols)])",
"def result(self, state, action):\n\n # blank is the index of the blank square\n blank = self.find_blank_square(state)\n new_state = list(state)\n\n delta = {'UP': -3, 'DOWN': 3, 'LEFT': -1, 'RIGHT': 1}\n neighbor = blank + delta[action]\n new_state[blank], new_state[neighbor] = new_state[neighbor], new_state[blank]\n\n return tuple(new_state)",
"def nodes(self) -> tuple[Node, Node]:\n self.lab.sync_topology_if_outdated()\n return self.node_a, self.node_b",
"def result(self, state, action):\n \n worker = state[0]\n boxes = state[1]\n move = action[1]\n coord = action[0] \n newBoxes = []\n \n worker = coord\n \n for box in boxes:\n if box == coord:\n newBox = move_coords(box, move)\n newBoxes.append(newBox)\n else:\n newBoxes.append(box)\n \n newState = ((worker), tuple(newBoxes))\n return newState",
"def startingState(self):\n # Returns starting position and 4 false because no corners are visited yet\n return (self.startingPosition, (False, False, False, False))",
"def coordinate(self):\n\t\tif self.boldness_coord is None and self.price_coord is None and self.hold_coord is None:\n\t\t\treturn None\n\n\t\treturn (self.boldness_coord, self.price_coord, self.hold_coord)",
"def represent_state(state):\n return tuple(state[0]), tuple(state[1]), tuple(state[2])",
"def fullState(self):\n return (self.state, self._textMsg, self._hubMsg)",
"def joint_pairs(self):\n return ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13))",
"def get_request(self):\n (data, self.socket), client_addr = socketserver.UDPServer.get_request(self)\n logger.info(\"Server connected to by:{}\".format(client_addr))\n return (data, self.socket), client_addr",
"def get_latest_states(self) -> Tuple:\n # Ensure index is in an allowable range\n assert self.step <= self.num_steps\n\n o_t = self.obs_buffer[self.step].unsqueeze(0)\n h_tminus1 = self.hid_buffer[self.step].unsqueeze(0)\n m_t = self.don_buffer[self.step].unsqueeze(0)\n\n return o_t, h_tminus1, m_t",
"def getstate(self):\n out = []\n for row in self.a:\n out.append([])\n for item in row:\n out[-1].append(itemstate(item))\n return (\"matrix\", out, self.y, self.x, self.converter, self.onlydiag())",
"def result(self, state, action):\n \n assert action in self.actions(state)\n worker = state[0]\n boxes = state[1]\n newBoxes = []\n \n worker = move_coords(worker, action)\n \n for box in boxes:\n if worker == box:\n newBox = move_coords(box, action)\n newBoxes.append(newBox)\n else:\n newBoxes.append(box)\n \n newState = ((worker), tuple(newBoxes))\n return newState",
"def address(self) -> tuple[str, int]:",
"def tuple(self) -> Tuple[float, float]:\n return (self.latitude, self.longitude)",
"def get_trajectory(self) -> Tuple:\n # Ensure index is in an allowable range\n assert self.step <= self.num_steps\n # TODO NOTE consider edge case: what happens if this is called right\n # after self.cycle() has been called? does it still work?\n\n # ==\n # Get the trajectory up to current timestep\n o_traj = (self.obs_buffer[:self.step]\n .view(-1, *self.observation_shape)) # (T, *obs_shape)\n h_init = (self.hid_buffer[0]\n .view(-1, self.hidden_state_dim)) # (1, hidden_dim)\n d_traj = (self.don_buffer[:self.step]\n .view(-1, 1)) # (T, 1)\n a_traj = (self.act_buffer[:self.step]\n .view(-1, self.action_dim)) # (T, action_dim)\n\n # Return\n return o_traj, h_init, d_traj, a_traj",
"def get_tup(self):\n\n tup = (\n self.key,\n self.headers,\n self.proxies,\n self.link,\n self.delta,\n self.size,\n self.status,\n datetime.now(),\n )\n return tup",
"def successors(state):\n free_coordinates = []\n for i in range(3):\n for j in range(3):\n if state[i][j] == '_':\n free_coordinates.append([i, j])\n\n return free_coordinates",
"def get(self):\n return (self.x,self.y);",
"def final_states(self) -> Tuple[tuple, ...]:\n return self._final_states",
"def get_observation_neighbour(self):\n state = {}\n for grid_id, grid in self.grids.items():\n drivers = list(grid.get_idle_drivers().values())\n orders = self.get_active_order_neighbour(grid)\n neighbour_drivers = self.get_active_driver_neighbour(grid)\n for driver in drivers:\n driver_id = driver.get_driver_id()\n loc = driver.get_position()\n time = driver.city_time\n neighbour_drivers.remove(driver) # pop itself\n assert driver_id not in state\n state[driver_id] = [(loc, time), orders, neighbour_drivers]\n neighbour_drivers.append(driver) # insert back\n return state"
]
| [
"0.6140449",
"0.59999305",
"0.5931853",
"0.5850808",
"0.58111596",
"0.58111596",
"0.5742225",
"0.5667046",
"0.56276125",
"0.5614639",
"0.5608495",
"0.54987055",
"0.549173",
"0.54879147",
"0.54866815",
"0.54844534",
"0.5476058",
"0.54419196",
"0.5407677",
"0.5401779",
"0.5390978",
"0.53645414",
"0.5354915",
"0.53172153",
"0.5315032",
"0.52924037",
"0.52893317",
"0.5274816",
"0.52523303",
"0.52520967"
]
| 0.6355352 | 0 |
Calculate the Wilcoxon signedrank test The Wilcoxon signedrank tests the null hypothesis that two related paired samples come from the same distribution. It tests whether the distribution of the difference x y is symmetric about zero. | def wilcoxon_test(data):
n = len(data)
print(n)
absolute_values = []
for d in data:
absolute_values.append((d, np.abs(d)))
absolute_values.sort(key=lambda x: x[1])
ret = []
for i, d in enumerate(absolute_values):
ret.append((i + 1, d[0], d[1]))
t_plus = 0
t_minus = 0
for tup in ret:
if tup[1] < 0:
t_minus += tup[0]
else:
t_plus += tup[0]
w = min(t_plus, t_minus)
E_w = n * (n + 1) / 4
se = np.sqrt(n * (n+1) * (2*n+1)/24)
z = (w - E_w) / se
p_value = 2. * norm.sf(abs(z)) # two sided test
return z, p_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _wilcoxon(_sample_a, _sample_b):\n res = stats.ranksums(_sample_a, _sample_b)\n print('Wilcoxon rank-sum\\nstatistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)",
"def lwilcoxont(x,y):\r\n if len(x) <> len(y):\r\n raise ValueError, 'Unequal N in wilcoxont. Aborting.'\r\n d=[]\r\n for i in range(len(x)):\r\n diff = x[i] - y[i]\r\n if diff <> 0:\r\n d.append(diff)\r\n count = len(d)\r\n absd = map(abs,d)\r\n absranked = rankdata(absd)\r\n r_plus = 0.0\r\n r_minus = 0.0\r\n for i in range(len(absd)):\r\n if d[i] < 0:\r\n r_minus = r_minus + absranked[i]\r\n else:\r\n r_plus = r_plus + absranked[i]\r\n wt = min(r_plus, r_minus)\r\n mn = count * (count+1) * 0.25\r\n se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)\r\n z = math.fabs(wt-mn) / se\r\n prob = 2*(1.0 -zprob(abs(z)))\r\n return wt, prob",
"def WilcoxonTest(list1, list2):\n\tp_value = 1.0\n\tassert(len(list1)==len(list2))\n\tdiffs = [b-a for (a,b) in zip(list1, list2)]\n\tranks = zip([math.fabs(d) for d in diffs], range(1,len(diffs)+1))\n\tranks.sort()\n\tW = 0.0\n\tfor i in range(len(diffs)):\n\t\tif diffs[i] > 0:\n\t\t\tW += ranks[i][1]\n\t#print \"#\", W\n\tn = len(list1)\n\tmean = n*(n+1)/4.0\n\tstdev = math.sqrt(n*(n+1)*(2*n+1)/6.0)\n\tif stdev > 0:\n\t\tZ = (W - mean)/stdev\n\telse:\n\t\tif Median(list1) < Median(list2):\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn 1.0\n\tp_value = Prob_Z(Z)\n\treturn p_value",
"def test_compare_different_expectations(self):\n\n pd_single = norm(0, 1)\n pd = []\n for i in range(0, 3):\n pd.append(pd_single)\n meas = [-1, 0, 1]\n meanCRIGN1, singleCRIGN1 = crign.crign(pd, meas)\n\n pd2 = []\n for i in range(0, 3):\n pd2.append(norm(i, 1))\n meas2 = [-1, 1, 3]\n\n meanCRIGN2, singleCRIGN2 = crign.crign(pd2, meas2)\n\n is_good = np.isclose(singleCRIGN1, singleCRIGN2).all()\n assert_true(is_good, msg=\"Relation of individual CRIGN values should return roughly the same value.\")",
"def mw_test(n1, n2):\r\n # find smaller sample, defined historically as n2. modify the names so we\r\n # don't risk modifying data outside the scope of the function.\r\n if len(n2) > len(n1):\r\n sn1, sn2 = array(n2), array(n1)\r\n else:\r\n sn1, sn2 = array(n1), array(n2)\r\n # sum the ranks of s2 by using the searchsorted magic. the logic is that we\r\n # use a sorted copy of the data from both groups (n1 and n2) to figure out\r\n # at what index we would insert the values from sample 2. by assessing the\r\n # difference between the index that value x would be inserted in if we were\r\n # doing left insertion versus right insertion, we can tell how many values\r\n # are tied with x. this allows us to calculate the average ranks easily.\r\n data = sorted(hstack([sn1, sn2]))\r\n ssl = searchsorted(data, sn2, 'left')\r\n ssr = searchsorted(data, sn2, 'right')\r\n sum_sn2_ranks = ((ssl + ssr + 1) / 2.).sum()\r\n ln1, ln2 = sn1.size, sn2.size\r\n C = (ln1 * ln2) + (ln2 * (ln2 + 1) / 2.) - sum_sn2_ranks\r\n U = max(C, ln1 * ln2 - C)\r\n # now we calculate the pvalue using the normal approximation and the two\r\n # tailed test. our formula corrects for ties, because in the case where\r\n # there are no ties, the forumla on the bottom of pg 429=the formula on the\r\n # bottom of pg 430.\r\n numerator = (U - ln1 * ln2 / 2.)\r\n # follwing three lines give the T value in the formula on page 430. same\r\n # logic as above; we calculate the left and right indices of the unique\r\n # values for all combined data from both samples, then calculate ti**3-ti\r\n # for each value.\r\n ux = unique(data)\r\n uxl = searchsorted(data, ux, 'left')\r\n uxr = searchsorted(data, ux, 'right')\r\n T = _corr_kw(uxr - uxl).sum()\r\n denominator = sqrt(((ln1 * ln2) / float((ln1 + ln2) * (ln1 + ln2 - 1))) * (((ln1 + ln2) ** 3\r\n - (ln1 + ln2) - T) / 12.))\r\n if denominator == 0:\r\n # Warning: probability of U can't be calculated by mw_test\r\n # because all ranks of data were tied. Returning nan as pvalue.\r\n return U, nan\r\n else:\r\n pval = zprob(numerator / float(denominator))\r\n return U, pval",
"def test_ppt_distinguishability_werner_hiding_pairs():\n dim = 2\n sigma_0 = (np.kron(np.identity(dim), np.identity(dim)) + swap_operator(dim)) / (dim * (dim + 1))\n sigma_1 = (np.kron(np.identity(dim), np.identity(dim)) - swap_operator(dim)) / (dim * (dim - 1))\n\n states = [sigma_0, sigma_1]\n\n expected_val = 1 / 2 + 1 / (dim + 1)\n\n primal_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=True)\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"min-error\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, expected_val, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, expected_val, atol=0.001), True)\n\n primal_res = ppt_distinguishability(\n states, probs=None, dist_method=\"unambiguous\", strategy=True\n )\n dual_res = ppt_distinguishability(states, probs=None, dist_method=\"unambiguous\", strategy=False)\n\n np.testing.assert_equal(np.isclose(primal_res, 1 / 3, atol=0.001), True)\n np.testing.assert_equal(np.isclose(dual_res, 1 / 3, atol=0.001), True)",
"def compare_samples(populations,parametric=False):\n from scipy.stats import mannwhitneyu, ttest_ind, f_oneway, kruskal, ranksums\n from statsmodels.stats.multicomp import pairwise_tukeyhsd\n populations = [np.array(pop) for pop in populations] #obscure line to take out missing values\n populations = [pop[~np.isnan(pop)] for pop in populations]\n\n if len(populations) == 2:\n if parametric:\n stat, p_value = ttest_ind(*populations)\n print(\"P-value t-test: {0:2.10f}\".format(p_value))\n else:\n stat, p_value1 = mannwhitneyu(*populations)\n print(\"P-value MWW: {0:2.10f}\".format(p_value))\n stat, p_value2 = ranksums(*populations)\n print(\"P-value Ranksum: {0:2.10f}\".format(p_value))\n \n if len(populations) > 2:\n if parametric:\n stat, p_value = f_oneway(*populations)\n print(\"P-value anova: {0:2.10f}\".format(p_value))\n else:\n stat, p_value = kruskal(*populations) \n print(\"P-value kruskal: {0:2.10f}\".format(p_value))\n \n if p_value < 0.05:\n flatten_pop = []\n label_pop = []\n for i,pop in enumerate(populations):\n flatten_pop += list(pop)\n label_pop += [\"pop{0}\".format(i)]*len(pop)\n \n res2 = pairwise_tukeyhsd(np.asarray(flatten_pop),label_pop)\n print(\"Printing pair comparisons using Tukey HSD\")\n print(res2)\n res2.plot_simultaneous(comparison_name=None,xlabel='diffs',ylabel='grups')\n \n print((\"Means: \" + \", {}\"*len(populations)).format(*[np.mean(_) for _ in populations]))\n print((\"STDs: \" + \", {}\"*len(populations)).format(*[np.std(_) for _ in populations]))\n \n \n return p_value",
"def _mann_whitney(_sample_a, _sample_b):\n res = stats.mannwhitneyu(_sample_a, _sample_b, use_continuity=True)\n print('Mann-Whitney rank test\\nU-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)",
"def isStochasticallyDominated(wvalues1, wvalues2, probabilitiesForObjectives):\n not_equal = False\n for self_wvalue, other_wvalue, p in zip(wvalues1, wvalues2, probabilitiesForObjectives):\n r = random.random()\n if (r<=p):\n if self_wvalue > other_wvalue:\n return False\n elif self_wvalue < other_wvalue:\n not_equal = True\n return not_equal",
"def test_estimate_statistics_ppswor(self):\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n element_weight = math.log(FAILURE_PROBABILITY_INVERSE, math.e)\n s.process(\"a\", element_weight)\n sampling_probability = (FAILURE_PROBABILITY_INVERSE -\n 1) / FAILURE_PROBABILITY_INVERSE\n self.assertEqual(s.estimate_statistics(),\n element_weight / sampling_probability)",
"def _wilcoxon_holm(alpha=0.05, df_perf=None):\n # count the number of tested datasets per classifier\n df_counts = pd.DataFrame({'count': df_perf.groupby(\n ['classifier_name']).size()}).reset_index()\n # get the maximum number of tested datasets\n max_nb_datasets = df_counts['count'].max()\n # get the list of classifiers who have been tested on nb_max_datasets\n classifiers = list(df_counts.loc[df_counts['count'] == max_nb_datasets]\n ['classifier_name'])\n # test the null hypothesis using friedman before doing a post-hoc analysis\n friedman_p_value = friedmanchisquare(*(\n np.array(df_perf.loc[df_perf['classifier_name'] == c]['accuracy'])\n for c in classifiers))[1]\n print(friedman_p_value)\n\n # get the number of classifiers\n m = len(classifiers)\n # init array that contains the p-values calculated by the Wilcoxon signed rank test\n p_values = []\n # loop through the algorithms to compare pairwise\n for i in range(m - 1):\n # get the name of classifier one\n classifier_1 = classifiers[i]\n # get the performance of classifier one\n perf_1 = np.array(\n df_perf.loc[df_perf['classifier_name'] == classifier_1]['accuracy'], dtype=np.float64)\n for j in range(i + 1, m):\n # get the name of the second classifier\n classifier_2 = classifiers[j]\n # get the performance of classifier one\n perf_2 = np.array(df_perf.loc[df_perf['classifier_name'] == classifier_2]\n ['accuracy'], dtype=np.float64)\n # calculate the p_value\n p_value = wilcoxon(perf_1, perf_2, zero_method='pratt')[1]\n # appen to the list\n p_values.append((classifier_1, classifier_2, p_value, False))\n # get the number of hypothesis\n k = len(p_values)\n # sort the list in acsending manner of p-value\n p_values.sort(key=operator.itemgetter(2))\n\n # loop through the hypothesis\n for i in range(k):\n # correct alpha with holm\n new_alpha = float(alpha / (k - i))\n # test if significant after holm's correction of alpha\n if p_values[i][2] <= new_alpha:\n p_values[i] = (p_values[i][0], p_values[i]\n [1], p_values[i][2], True)\n else:\n # stop\n break\n # compute the average ranks to be returned (useful for drawing the cd diagram)\n # sort the dataframe of performances\n sorted_df_perf = df_perf.loc[df_perf['classifier_name'].isin(classifiers)]. \\\n sort_values(['classifier_name', 'dataset_name'])\n # get the rank data\n rank_data = np.array(sorted_df_perf['accuracy']).reshape(\n m, max_nb_datasets)\n\n # create the data frame containg the accuracies\n df_ranks = pd.DataFrame(data=rank_data, index=np.sort(\n classifiers), columns=np.unique(sorted_df_perf['dataset_name']))\n\n # average the ranks\n average_ranks = df_ranks.rank(ascending=False).mean(\n axis=1).sort_values(ascending=False)\n # return the p-values and the average ranks\n return p_values, average_ranks, max_nb_datasets",
"def lpaired(x,y):\r\n samples = ''\r\n while samples not in ['i','r','I','R','c','C']:\r\n print '\\nIndependent or related samples, or correlation (i,r,c): ',\r\n samples = raw_input()\r\n\r\n if samples in ['i','I','r','R']:\r\n print '\\nComparing variances ...',\r\n# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112\r\n r = obrientransform(x,y)\r\n f,p = F_oneway(pstats.colex(r,0),pstats.colex(r,1))\r\n if p<0.05:\r\n vartype='unequal, p='+str(round(p,4))\r\n else:\r\n vartype='equal'\r\n print vartype\r\n if samples in ['i','I']:\r\n if vartype[0]=='e':\r\n t,p = ttest_ind(x,y,0)\r\n print '\\nIndependent samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n if len(x)>20 or len(y)>20:\r\n z,p = ranksums(x,y)\r\n print '\\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)\r\n else:\r\n u,p = mannwhitneyu(x,y)\r\n print '\\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)\r\n\r\n else: # RELATED SAMPLES\r\n if vartype[0]=='e':\r\n t,p = ttest_rel(x,y,0)\r\n print '\\nRelated samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n t,p = ranksums(x,y)\r\n print '\\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)\r\n else: # CORRELATION ANALYSIS\r\n corrtype = ''\r\n while corrtype not in ['c','C','r','R','d','D']:\r\n print '\\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',\r\n corrtype = raw_input()\r\n if corrtype in ['c','C']:\r\n m,b,r,p,see = linregress(x,y)\r\n print '\\nLinear regression for continuous variables ...'\r\n lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]\r\n pstats.printcc(lol)\r\n elif corrtype in ['r','R']:\r\n r,p = spearmanr(x,y)\r\n print '\\nCorrelation for ranked variables ...'\r\n print \"Spearman's r: \",round(r,4),round(p,4)\r\n else: # DICHOTOMOUS\r\n r,p = pointbiserialr(x,y)\r\n print '\\nAssuming x contains a dichotomous variable ...'\r\n print 'Point Biserial r: ',round(r,4),round(p,4)\r\n print '\\n\\n'\r\n return None",
"def awilcoxont(x,y):\r\n if len(x) <> len(y):\r\n raise ValueError, 'Unequal N in awilcoxont. Aborting.'\r\n d = x-y\r\n d = N.compress(N.not_equal(d,0),d) # Keep all non-zero differences\r\n count = len(d)\r\n absd = abs(d)\r\n absranked = arankdata(absd)\r\n r_plus = 0.0\r\n r_minus = 0.0\r\n for i in range(len(absd)):\r\n if d[i] < 0:\r\n r_minus = r_minus + absranked[i]\r\n else:\r\n r_plus = r_plus + absranked[i]\r\n wt = min(r_plus, r_minus)\r\n mn = count * (count+1) * 0.25\r\n se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)\r\n z = math.fabs(wt-mn) / se\r\n z = math.fabs(wt-mn) / se\r\n prob = 2*(1.0 -zprob(abs(z)))\r\n return wt, prob",
"def wilcoxon_test(result, reference):\n \n print('\\nChecking that result and reference are 1D and that they have the same length\\n')\n \n if (len(result.shape) == 1) and (len(reference.shape) == 1):\n \n if len(result) == len(reference):\n \n print('Performing Wilcoxon test\\n')\n \n s, p_value = scipy.stats.wilcoxon(result, reference)\n \n print('Wilcoxon test completed successfully!\\n')\n \n print('Sum of rank differences: {} // p value: {}'.format(s, p_value))\n \n return s, p_value\n \n else:\n \n print('Result and reference vectors do not have the same length. Please input them so that they have the same length')\n \n else:\n \n print('Result or reference vectors are not 1D. Please reformat them to be 1D')",
"def top_is_pointwise(self):\n return False",
"def KolmogorovSmirnoff_statistics(dd1, dd2):\n cum1 = dd1.cumulative_distribution()\n cum2 = dd2.cumulative_distribution()\n minimum = max(cum1[0][0], cum2[0][0])\n maximum = max(cum1[-1][0], cum2[-1][0])\n index1 = len(cum1) - 1\n index2 = len(cum2) - 1\n summa1 = summa2 = 0\n\n difference = 0\n for i in reversed(range(minimum, maximum+1)):\n if cum1[index1][0] == i:\n summa1 = cum1[index1][1]\n index1 -= 1\n if cum2[index2][0] == i:\n summa2 = cum2[index2][1]\n index2 -= 1\n if abs(summa1 - summa2) > difference:\n difference = abs(summa1 - summa2)\n return difference",
"def test_samples_close_to_inclusion_probability_ppswor(self):\n # The range we allow around 0.5n\n distance_from_half = 0.01\n # The number of elements we use (computed using Chernoff bounds)\n n = int((6.0 / (distance_from_half**2)) *\n math.log(2 * FAILURE_PROBABILITY_INVERSE, math.e) + 1)\n s = private_sampling.ThresholdSample(1.0,\n private_sampling.PpsworSamplingMethod)\n for i in range(n):\n s.process(i, math.log(2.0, math.e))\n self.assertGreaterEqual(len(s.elements), (0.5 - distance_from_half) * n)\n self.assertLessEqual(len(s.elements), (0.5 + distance_from_half) * n)",
"def perform_wilcoxon_validation(series1, series2):\n differences, sorted_diffs = ExperimentUtil._calculate_differences(series1, series2)\n sorted_diffs.sort()\n position_diffs = ExperimentUtil._calculate_position_differences(differences, sorted_diffs)\n\n for index, score in enumerate(differences):\n if score < 0:\n position_diffs[index] = position_diffs[index] * -1\n\n sum_positive, sum_negative = ExperimentUtil._calculate_positive_negative_sum(position_diffs)\n T = min(sum_positive, sum_negative)\n # TODO: Se o tamanho de n for maior que 30, seria preciso usar a tabela T-Student\n if len(position_diffs) <= 30:\n # TODO: Com o valor de T, precisamos ver qual o valor critico e elaborar melhor a resposta no relatorio\n return T < ExperimentUtil.wilcox_table[len(position_diffs)]",
"def atiecorrect(rankvals):\r\n sorted,posn = ashellsort(N.array(rankvals))\r\n n = len(sorted)\r\n T = 0.0\r\n i = 0\r\n while (i<n-1):\r\n if sorted[i] == sorted[i+1]:\r\n nties = 1\r\n while (i<n-1) and (sorted[i] == sorted[i+1]):\r\n nties = nties +1\r\n i = i +1\r\n T = T + nties**3 - nties\r\n i = i+1\r\n T = T / float(n**3-n)\r\n return 1.0 - T",
"def ranksums(data, dataLabel=None, paired=False, decimals=4):\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.permutation: data must be a dictionary with'\n + ' at exactly 2 keys' +\n '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n# labels = data.keys()\n g1 = data[k[0]]\n g2 = data[k[1]]\n # n1 = len(g1)\n # n2 = len(g2)\n\n if paired:\n (z, p) = Stats.wilcoxon(g1, g2)\n res = RStats.wilcox_test(g1, g2, pair=True)\n testtype = \"Wilcoxon signed-rank\"\n pairedtype = \"Paired\"\n else:\n (z, p) = Stats.ranksums(g1, g2)\n testtype = \"Rank-sums test\"\n res = RStats.wilcox_test(g1, g2, pair=False)\n pairedtype = \"Independent\"\n\n g1mean = np.mean(g1)\n g1std = np.std(g1, ddof=1)\n g2mean = np.mean(g2)\n g2std = np.std(g2, ddof=1)\n (w1, p1) = Stats.shapiro(g1) #, a=None, reta=False)\n (w2, p2) = Stats.shapiro(g2) #, a=None, reta=False)\n if dataLabel is not None:\n n = max([len(l) for l in k])\n print('\\n%s test, data set = %s' % (testtype, dataLabel))\n if p1 < 0.05 and p2 < 0.05:\n print(u' Both data sets appear normally distributed: \\n'\n + ' Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.\n format(p1, p2))\n else:\n print(u' ***At least one Data set is NOT normally distributed***\\n'\n + ' Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.\n format(p1, p2))\n print(u' (RankSums does not assume normal distribution)')\n \n print(u' {:s}={:8.{pc}f}\\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[0].rjust(n), g1mean, g1std, len(g1), pc=decimals))\n print(u' {:s}={:8.{pc}f}\\u00B1{:.{pc}f}, {:d} (mean, SD, N)'.\n format(k[1].rjust(n), g2mean, g2std, len(g2), pc=decimals))\n summarizeData(data, decimals=decimals)\n # iqr1 = np.subtract(*np.percentile(g1, [75, 25]))\n # iqr2 = np.subtract(*np.percentile(g2, [75, 25]))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.\n # format(k[0].rjust(n), np.median(g1), iqr1))\n # print(u' {:s}: median={:8.4f} IQR={:8.4f}'.\n # format(k[1].rjust(n), np.median(g2), iqr2))\n print(u' z={:8.4f} p={:8.6f} <scipy.Stats: {:20s}, {:11s}>'.\n format(float(z), float(p), testtype, pairedtype))\n print(u' z={:8.4f} p={:8.6f} <R Stats : {:20s}, {:11s}>\\n'.\n format(res[res.names.index('statistic')][0],\n float(res[res.names.index('p.value')][0]),\n testtype, pairedtype))\n return(float(z), float(p))",
"def test_rank_centrality():\n for case in iter_testcases('pairwise'):\n n_items = case[\"n_items\"]\n data = case[\"data\"]\n assert np.allclose(\n case[\"rc_est\"], rank_centrality(n_items, data),\n atol=ATOL, rtol=RTOL)",
"def test_two_player_zero_sum_pure_wellfare(strategies):\n game = gamegen.two_player_zero_sum_game(strategies)\n for prof in game.profiles():\n assert np.isclose(\n regret.pure_social_welfare(game, prof), 0\n ), \"zero sum profile wasn't zero sum\"",
"def tie_correction(sx):\r\n ux = unique(sx)\r\n uxl = searchsorted(sx, ux, 'left')\r\n uxr = searchsorted(sx, ux, 'right')\r\n return 1. - _corr_kw(uxr - uxl).sum() / float(_corr_kw(len(sx)))",
"def wilcoxon_w(pairs: List[Tuple[Union[float, int], int]]) -> float:\n i = 0\n w = 0.0\n while i < len(pairs):\n absi = pairs[i][0]\n sum_sgn = pairs[i][1]\n j = i + 1\n while j < len(pairs) and pairs[j][0] == absi:\n sum_sgn += pairs[j][1]\n j += 1\n r = 1 + 0.5 * (i + j - 1) # smoothed rank\n w += r * sum_sgn\n i = j\n return w",
"def _wk_test(self, alternative='two-sided', alpha=0.01):\n\n q0 = self.get_group_data(0, self.df_test_resampled, ['Q'])\n q1 = self.get_group_data(1, self.df_test_resampled, ['Q'])\n\n u_wk, p_wk = mannwhitneyu(q0, q1, alternative=alternative)\n stats_wk = ranksums(q0, q1)[0]\n\n if p_wk <= alpha:\n h = 1\n else:\n h = 0\n\n stats_wk = {'zval': stats_wk, 'pval': p_wk}\n\n return h, stats_wk",
"def test_two_player_zero_sum_mixture_wellfare(strategies):\n game = gamegen.two_player_zero_sum_game(strategies)\n for prof in game.random_mixtures(20):\n assert np.isclose(\n regret.mixed_social_welfare(game, prof), 0\n ), \"zero sum profile wasn't zero sum\"",
"def compare(predictions, truth):\n comp = predictions - truth\n return 1 - (np.count_nonzero(comp) / len(predictions))",
"def test_spearman_ranked(self):\r\n exp = 0.2969697\r\n obs = spearman(self.b_ranked, self.c_ranked)\r\n self.assertFloatEqual(obs, exp)",
"def test_no_duplicates_and_positives_in_negative_sample(self):\n model = PoincareModel(self.data_large, negative=3)\n positive_nodes = model.node_relations[0] # Positive nodes for node 0\n num_samples = 100 # Repeat experiment multiple times\n for i in range(num_samples):\n negatives = model._sample_negatives(0)\n self.assertFalse(positive_nodes & set(negatives))\n self.assertEqual(len(negatives), len(set(negatives)))",
"def compute_sign(k1, k2):\n\n def ordering_sign(permu, weights):\n \"\"\"Returns the exponent of the Koszul sign of the given\n permutation acting on the elements of degrees given by the\n list of weights\n\n \"\"\"\n sign_exp = 0\n for idx, j in enumerate(permu):\n to_add = [weights[permu.index(i)] for\n i in permu[idx + 1:] if i < j]\n sign_exp += weights[idx] * sum(to_add)\n return sign_exp % 2\n\n def action_sign(ordered_k1, ordered_weights):\n \"\"\"Given a ordered tuple [1,..,1, 2,...,2, ..., r,...,r]\n and weights [w_1, w_2, ..., w_{r+d}] of the same length, gives\n the koszul sign obtained by inserting from the left a weight 1\n operator between equal consecutive elements.\n\n \"\"\"\n sign_exp = 0\n for idx, (i, j) in enumerate(pairwise(ordered_k1)):\n if i == j:\n sign_exp += sum(ordered_weights[:idx + 1])\n return sign_exp % 2\n\n sign_exp = 0\n weights = [e.dimension % 2 for e in k2]\n inv_ordering_permu = [pair[0] for pair in\n sorted(enumerate(k1), key=itemgetter(1))]\n ordering_permu = tuple(inv_ordering_permu.index(i)\n for i in range(len(inv_ordering_permu)))\n sign_exp += ordering_sign(ordering_permu, weights)\n ordered_k1 = list(sorted(k1))\n ordered_weights = [weights[i] for i in inv_ordering_permu]\n sign_exp += action_sign(ordered_k1, ordered_weights)\n return (-1) ** sign_exp"
]
| [
"0.7029563",
"0.6060772",
"0.60201573",
"0.585339",
"0.58216035",
"0.5812656",
"0.5804695",
"0.5777771",
"0.5729734",
"0.57100165",
"0.5675171",
"0.5665772",
"0.5662058",
"0.5558153",
"0.5539113",
"0.55338377",
"0.55330694",
"0.55140287",
"0.54924697",
"0.547573",
"0.54596657",
"0.5452372",
"0.5445089",
"0.5438541",
"0.54338133",
"0.54327345",
"0.53871226",
"0.5380199",
"0.53691226",
"0.5365123"
]
| 0.6156549 | 1 |
Calculate the MannWhitney rank test on samples X and Y. It tests whether they have the same median. | def mann_whitney_u_test(X, Y):
m, n = len(X), len(Y)
U = 0
for x in X:
for y in Y:
if x < y:
U += 1
E_u = m * n / 2
var_u = m * n * (m + n + 1) / 12
z = (U - E_u) / np.sqrt(var_u)
p_value = 2. * norm.sf(abs(z)) # two sided test
return z, p_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _mann_whitney(_sample_a, _sample_b):\n res = stats.mannwhitneyu(_sample_a, _sample_b, use_continuity=True)\n print('Mann-Whitney rank test\\nU-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)",
"def amannwhitneyu(x,y):\r\n n1 = len(x)\r\n n2 = len(y)\r\n ranked = rankdata(N.concatenate((x,y)))\r\n rankx = ranked[0:n1] # get the x-ranks\r\n ranky = ranked[n1:] # the rest are y-ranks\r\n u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x\r\n u2 = n1*n2 - u1 # remainder is U for y\r\n bigu = max(u1,u2)\r\n smallu = min(u1,u2)\r\n T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores\r\n if T == 0:\r\n raise ValueError, 'All numbers are identical in amannwhitneyu'\r\n sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)\r\n z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc\r\n return smallu, 1.0 - azprob(z)",
"def lmannwhitneyu(x,y):\r\n n1 = len(x)\r\n n2 = len(y)\r\n ranked = rankdata(x+y)\r\n rankx = ranked[0:n1] # get the x-ranks\r\n ranky = ranked[n1:] # the rest are y-ranks\r\n u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x\r\n u2 = n1*n2 - u1 # remainder is U for y\r\n bigu = max(u1,u2)\r\n smallu = min(u1,u2)\r\n T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores\r\n if T == 0:\r\n raise ValueError, 'All numbers are identical in lmannwhitneyu'\r\n sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)\r\n z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc\r\n return smallu, 1.0 - zprob(z)",
"def _wilcoxon(_sample_a, _sample_b):\n res = stats.ranksums(_sample_a, _sample_b)\n print('Wilcoxon rank-sum\\nstatistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)",
"def mw_test(n1, n2):\r\n # find smaller sample, defined historically as n2. modify the names so we\r\n # don't risk modifying data outside the scope of the function.\r\n if len(n2) > len(n1):\r\n sn1, sn2 = array(n2), array(n1)\r\n else:\r\n sn1, sn2 = array(n1), array(n2)\r\n # sum the ranks of s2 by using the searchsorted magic. the logic is that we\r\n # use a sorted copy of the data from both groups (n1 and n2) to figure out\r\n # at what index we would insert the values from sample 2. by assessing the\r\n # difference between the index that value x would be inserted in if we were\r\n # doing left insertion versus right insertion, we can tell how many values\r\n # are tied with x. this allows us to calculate the average ranks easily.\r\n data = sorted(hstack([sn1, sn2]))\r\n ssl = searchsorted(data, sn2, 'left')\r\n ssr = searchsorted(data, sn2, 'right')\r\n sum_sn2_ranks = ((ssl + ssr + 1) / 2.).sum()\r\n ln1, ln2 = sn1.size, sn2.size\r\n C = (ln1 * ln2) + (ln2 * (ln2 + 1) / 2.) - sum_sn2_ranks\r\n U = max(C, ln1 * ln2 - C)\r\n # now we calculate the pvalue using the normal approximation and the two\r\n # tailed test. our formula corrects for ties, because in the case where\r\n # there are no ties, the forumla on the bottom of pg 429=the formula on the\r\n # bottom of pg 430.\r\n numerator = (U - ln1 * ln2 / 2.)\r\n # follwing three lines give the T value in the formula on page 430. same\r\n # logic as above; we calculate the left and right indices of the unique\r\n # values for all combined data from both samples, then calculate ti**3-ti\r\n # for each value.\r\n ux = unique(data)\r\n uxl = searchsorted(data, ux, 'left')\r\n uxr = searchsorted(data, ux, 'right')\r\n T = _corr_kw(uxr - uxl).sum()\r\n denominator = sqrt(((ln1 * ln2) / float((ln1 + ln2) * (ln1 + ln2 - 1))) * (((ln1 + ln2) ** 3\r\n - (ln1 + ln2) - T) / 12.))\r\n if denominator == 0:\r\n # Warning: probability of U can't be calculated by mw_test\r\n # because all ranks of data were tied. Returning nan as pvalue.\r\n return U, nan\r\n else:\r\n pval = zprob(numerator / float(denominator))\r\n return U, pval",
"def test__repeated_median(repeated_median):\n x, y, *_ = repeated_median\n assert repeated_median_slope(x, y) == 5.0",
"def mann_whitney_plus_means(turnstile_weather):\n with_rain = turnstile_weather[turnstile_weather.rain == 1]\n without_rain = turnstile_weather[turnstile_weather.rain == 0]\n\n with_rain_mean = with_rain['ENTRIESn_hourly'].mean()\n without_rain_mean = without_rain['ENTRIESn_hourly'].mean()\n U, p = scipy.stats.mannwhitneyu(with_rain['ENTRIESn_hourly'], without_rain['ENTRIESn_hourly'])\n\n return with_rain_mean, without_rain_mean, U, p",
"def mannwhitneyu(sample_0, sample_1, one_sided=False):\n res = stats.mannwhitneyu(sample_0, sample_1, alternative=\"two-sided\" if not one_sided else \"less\")\n return res.statistic, res.pvalue",
"def checkMedian(nums1, nums2, x=None, x_dash=None,y=None,y_dash=None,median=False):\n # odd array\n if (len(nums1) + len(nums2)) % 2 == 1:\n if x != None and x_dash != None and y != None and y_dash != None:\n if nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n median = nums1[x]\n return median\n elif nums1[x] <= nums2[y] and nums1[x_dash] >= nums2[y]:\n median = nums2[y]\n return median\n else:\n return False\n elif x == None and x_dash != None and y != None and y_dash == None:\n if nums1[x_dash] >= nums2[y]:\n median = nums2[y]\n return median\n else:\n return False\n elif x != None and x_dash == None and y != None and y_dash != None:\n if nums2[y] >= nums1[x]:\n median = nums2[y]\n return median\n elif nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n\n median = nums1[x]\n return median\n else:\n return False\n # even array\n else:\n if x != None and x_dash != None and y != None and y_dash != None:\n if nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n if nums1[x_dash] <= nums2[y_dash]:\n median = (nums1[x] + nums1[x_dash])/2\n elif nums1[x_dash] > nums2[y_dash]:\n median = (nums1[x] + nums2[y_dash])/2\n return median\n elif nums1[x] <= nums2[y] and nums1[x_dash] >= nums2[y]:\n if nums2[y_dash] <= nums1[x_dash]:\n median = (nums2[y] + nums2[y_dash])/2\n elif nums2[y_dash] > nums1[x_dash]:\n median = (nums2[y] + nums1[x_dash])/2\n return median\n else:\n return False\n elif x == None and x_dash != None and y != None and y_dash == None:\n if nums1[x_dash] >= nums2[y]:\n median = (nums2[y] + nums1[x_dash])/2\n return median\n else:\n return False\n elif x != None and x_dash == None and y != None and y_dash != None:\n if nums2[y] >= nums1[x]:\n median = (nums2[y] + nums2[y_dash])/2\n return median\n elif nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n median = (nums1[x] + nums2[y_dash])/2\n return median\n else:\n return False\n elif x != None and x_dash == None and y == None and y_dash != None:\n if nums1[x] <= nums2[y_dash]:\n median = (nums1[x] + nums2[y_dash])/2\n return median\n else:\n return False\n elif x == None and x_dash != None and y != None and y_dash != None:\n if nums2[y] <= nums1[x_dash]:\n if nums1[x_dash]<= nums2[y_dash]:\n median = (nums2[y] + nums1[x_dash])/2\n return median\n elif nums1[x_dash] > nums2[y_dash]:\n median = (nums2[y] + nums2[y_dash])/2\n return median\n else:\n return False",
"def test_basic_seasonal_median2D(self):\n self.testInst.bounds = self.bounds1\n vars = ['dummy1', 'dummy2', 'dummy3']\n results = avg.median2D(self.testInst, self.long_bins, 'longitude',\n self.mlt_bins, 'mlt', vars, returnData=True,\n auto_bin=self.auto_bin)\n\n # Iterate over all y rows. Value should be equal to integer value of\n # mlt. No variation in the median, all values should be the same.\n for i, y in enumerate(results['dummy1']['bin_y'][:-1]):\n assert np.all(results['dummy1']['median'][i, :] == y.astype(int))\n assert np.all(results['dummy1']['avg_abs_dev'][i, :] == 0)\n\n # Iterate over x rows. Value should be the longitude / 15.\n for i, x in enumerate(results['dummy1']['bin_x'][:-1]):\n assert np.all(results['dummy2']['median'][:, i] == x / 15.0)\n assert np.all(results['dummy2']['avg_abs_dev'][:, i] == 0)\n\n # Iterate over x rows. Value should be the longitude / 15 * 1000.\n for i, x in enumerate(results['dummy1']['bin_x'][:-1]):\n assert np.all(results['dummy3']['median'][:, i] == x / 15.0 * 1000.0\n + results['dummy1']['bin_y'][:-1])\n assert np.all(results['dummy3']['avg_abs_dev'][:, i] == 0)\n\n # Holds here because there are 32 days, no data is discarded,\n # and each day holds same amount of data.\n assert np.all(self.testInst.data['dummy1'].size * 3\n == sum([sum(i) for i in results['dummy1']['count']]))\n\n # Ensure all outputs are numpy arrays\n for var in vars:\n assert isinstance(results[var]['median'], type(np.array([])))\n\n # Ensure binned data returned\n for var in vars:\n assert 'data' in results[var].keys()\n\n return",
"def testMedian(self):\n stats = afwMath.makeStatistics(self.image, afwMath.MEDIAN)\n\n self.assertEqual(stats.getValue(afwMath.MEDIAN), self.val)\n\n values = [1.0, 2.0, 3.0, 2.0 ]\n self.assertEqual(afwMath.makeStatistics(values, afwMath.MEDIAN).getValue(), 2.0)",
"def _wilcoxon_holm(alpha=0.05, df_perf=None):\n # count the number of tested datasets per classifier\n df_counts = pd.DataFrame({'count': df_perf.groupby(\n ['classifier_name']).size()}).reset_index()\n # get the maximum number of tested datasets\n max_nb_datasets = df_counts['count'].max()\n # get the list of classifiers who have been tested on nb_max_datasets\n classifiers = list(df_counts.loc[df_counts['count'] == max_nb_datasets]\n ['classifier_name'])\n # test the null hypothesis using friedman before doing a post-hoc analysis\n friedman_p_value = friedmanchisquare(*(\n np.array(df_perf.loc[df_perf['classifier_name'] == c]['accuracy'])\n for c in classifiers))[1]\n print(friedman_p_value)\n\n # get the number of classifiers\n m = len(classifiers)\n # init array that contains the p-values calculated by the Wilcoxon signed rank test\n p_values = []\n # loop through the algorithms to compare pairwise\n for i in range(m - 1):\n # get the name of classifier one\n classifier_1 = classifiers[i]\n # get the performance of classifier one\n perf_1 = np.array(\n df_perf.loc[df_perf['classifier_name'] == classifier_1]['accuracy'], dtype=np.float64)\n for j in range(i + 1, m):\n # get the name of the second classifier\n classifier_2 = classifiers[j]\n # get the performance of classifier one\n perf_2 = np.array(df_perf.loc[df_perf['classifier_name'] == classifier_2]\n ['accuracy'], dtype=np.float64)\n # calculate the p_value\n p_value = wilcoxon(perf_1, perf_2, zero_method='pratt')[1]\n # appen to the list\n p_values.append((classifier_1, classifier_2, p_value, False))\n # get the number of hypothesis\n k = len(p_values)\n # sort the list in acsending manner of p-value\n p_values.sort(key=operator.itemgetter(2))\n\n # loop through the hypothesis\n for i in range(k):\n # correct alpha with holm\n new_alpha = float(alpha / (k - i))\n # test if significant after holm's correction of alpha\n if p_values[i][2] <= new_alpha:\n p_values[i] = (p_values[i][0], p_values[i]\n [1], p_values[i][2], True)\n else:\n # stop\n break\n # compute the average ranks to be returned (useful for drawing the cd diagram)\n # sort the dataframe of performances\n sorted_df_perf = df_perf.loc[df_perf['classifier_name'].isin(classifiers)]. \\\n sort_values(['classifier_name', 'dataset_name'])\n # get the rank data\n rank_data = np.array(sorted_df_perf['accuracy']).reshape(\n m, max_nb_datasets)\n\n # create the data frame containg the accuracies\n df_ranks = pd.DataFrame(data=rank_data, index=np.sort(\n classifiers), columns=np.unique(sorted_df_perf['dataset_name']))\n\n # average the ranks\n average_ranks = df_ranks.rank(ascending=False).mean(\n axis=1).sort_values(ascending=False)\n # return the p-values and the average ranks\n return p_values, average_ranks, max_nb_datasets",
"def median_absolute_error(y_true, y_pred, *, multioutput=..., sample_weight=...):\n ...",
"def _wk_test(self, alternative='two-sided', alpha=0.01):\n\n q0 = self.get_group_data(0, self.df_test_resampled, ['Q'])\n q1 = self.get_group_data(1, self.df_test_resampled, ['Q'])\n\n u_wk, p_wk = mannwhitneyu(q0, q1, alternative=alternative)\n stats_wk = ranksums(q0, q1)[0]\n\n if p_wk <= alpha:\n h = 1\n else:\n h = 0\n\n stats_wk = {'zval': stats_wk, 'pval': p_wk}\n\n return h, stats_wk",
"def test_output(self, arr, num_batches, expected, interface):\n arr = convert_to_interface(arr, interface)\n\n actual = median_of_means(arr, num_batches)\n assert actual.shape == ()\n assert np.allclose(actual, expected)",
"def evaluate(self, X_test, Y_test):\n \n test_data = zip(X_test, Y_test)\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n \n# Updated for the testing\n# ========================\n return (sum(int(x == y) for (x, y) in test_results) / 100)",
"def test_basic_seasonal_median2D(self):\n results = avg.median2D(self.testInst, [0., 360., 24], 'longitude',\n [0., 24., 24], 'mlt', [self.dname])\n\n # Values in xarray instrument depend upon longitude and mlt location.\n for i, row in enumerate(results[self.dname]['median']):\n mlt_val = i\n for j, item in enumerate(row):\n long_val = j * 1000.\n test_vals = mlt_val + long_val\n assert np.all(item[self.dname].values == test_vals)\n\n # No variation in the median, all values should be the same.\n for i, row in enumerate(results[self.dname]['avg_abs_dev']):\n for j, item in enumerate(row):\n assert np.all(item[self.dname].values == 0)\n\n return",
"def inner_median(x, y):\n intersection = np.intersect1d(x, y)\n return np.median(intersection)",
"def WilcoxonTest(list1, list2):\n\tp_value = 1.0\n\tassert(len(list1)==len(list2))\n\tdiffs = [b-a for (a,b) in zip(list1, list2)]\n\tranks = zip([math.fabs(d) for d in diffs], range(1,len(diffs)+1))\n\tranks.sort()\n\tW = 0.0\n\tfor i in range(len(diffs)):\n\t\tif diffs[i] > 0:\n\t\t\tW += ranks[i][1]\n\t#print \"#\", W\n\tn = len(list1)\n\tmean = n*(n+1)/4.0\n\tstdev = math.sqrt(n*(n+1)*(2*n+1)/6.0)\n\tif stdev > 0:\n\t\tZ = (W - mean)/stdev\n\telse:\n\t\tif Median(list1) < Median(list2):\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn 1.0\n\tp_value = Prob_Z(Z)\n\treturn p_value",
"def evaluate(self, X, y):\n correct = np.zeros((len(y),))\n for i in range(0,len(y)):\n r = P.forward(X[i,:])\n if r == y[i]:\n correct[i] = 1\n \n return np.mean(correct)*100",
"def test_basic_seasonal_median2D(self):\n results = avg.median2D(self.testInst, [0., 360., 24], 'longitude',\n [0., 24., 24], 'mlt', [self.dname])\n\n # Test medians.\n for i, row in enumerate(results[self.dname]['median']):\n for j, item in enumerate(row):\n assert np.all(item[self.dname] == self.test_vals)\n\n # No variation in the median, all values should be the same.\n for i, row in enumerate(results[self.dname]['avg_abs_dev']):\n for j, item in enumerate(row):\n assert np.all(item[self.dname] == 0)\n\n return",
"def test_basic_seasonal_2Dmedian(self):\n\n results = avg.median2D(self.testInst, [0., 360., 24], 'longitude',\n [0., 24., 24], 'mlt', [self.dname])\n\n # Test medians.\n for i, row in enumerate(results[self.dname]['median']):\n for j, item in enumerate(row):\n assert np.all(item['density'] == self.test_vals)\n assert np.all(item['fraction'] == self.test_fracs)\n\n # No variation in the median, all values should be the same\n for i, row in enumerate(results[self.dname]['avg_abs_dev']):\n for j, item in enumerate(row):\n assert np.all(item['density'] == 0)\n assert np.all(item['fraction'] == 0)\n\n return",
"def test_constellation_median2D(self):\n\n vars = ['dummy1', 'dummy2', 'dummy3']\n\n resultsC = avg.median2D(self.testC, [0., 360., 24], 'longitude',\n [0., 24., 24], 'mlt', vars)\n resultsI = avg.median2D(self.testI, [0., 360., 24], 'longitude',\n [0., 24., 24], 'mlt', vars)\n\n output_labels = ['median', 'avg_abs_dev']\n for var in vars:\n for output in output_labels:\n assert np.array_equal(resultsC[var][output],\n resultsI[var][output])\n\n return",
"def test_basic_seasonal_median1D(self):\n self.testInst.bounds = self.bounds1\n vars = ['dummy1', 'dummy2', 'dummy3']\n results = avg.median1D(self.testInst, self.long_bins, 'longitude',\n vars, returnData=True, auto_bin=self.auto_bin)\n\n # Iterate over x rows. Value should be the longitude / 15.\n for i, x in enumerate(results['dummy1']['bin_x'][:-1]):\n assert np.all(results['dummy2']['median'][i] == x / 15.0)\n assert np.all(results['dummy2']['avg_abs_dev'][i] == 0)\n\n # Iterate over x rows. Value should be the longitude / 15 * 1000.\n # except for the variation in value with 'mlt'.\n for i, x in enumerate(results['dummy1']['bin_x'][:-1]):\n assert np.all(results['dummy3']['median'][i] // 100 * 100 == x\n / 15.0 * 1000.0)\n assert np.all(results['dummy3']['avg_abs_dev'][i] > 0)\n\n # Holds here because there are 32 days, no data is discarded,\n # and each day holds same amount of data.\n assert np.all(self.testInst.data['dummy1'].size * 3\n == sum(results['dummy1']['count']))\n\n # Ensure all outputs are numpy arrays\n for var in vars:\n assert isinstance(results[var]['median'], type(np.array([])))\n\n # Ensure binned data returned\n for var in vars:\n assert 'data' in results[var].keys()\n\n return",
"def test_basic_seasonal_median1D(self):\n results = avg.median1D(self.testInst, [0., 24., 24], 'mlt',\n [self.dname])\n\n for i, row in enumerate(results[self.dname]['median']):\n # Define truth values. There is a variation in value based on\n # longitude, at thousands level. MLT only shows at ones/tens level.\n test_vals = [i] * self.test_val_length\n vals = []\n for val in row[self.dname].values:\n if not isinstance(val, np.float64):\n # Provide support for testing higher order data sources.\n val = val[0]\n vals.append(int(str(int(val))[-2:]))\n assert np.all(vals == test_vals)\n\n # There is a variation in binned value based upon longitude.\n for i, row in enumerate(results[self.dname]['avg_abs_dev']):\n assert np.all(row[self.dname] >= 0)\n\n return",
"def test__repeated_median_catch_division_by_zero(repeated_median):\n *_, divzero_x, divzero_y = repeated_median\n assert repeated_median_slope(divzero_x, divzero_y) == 1.0",
"def _weighted_spearman(y, y_pred, w=None):\n # idx = ~np.logical_or(np.isnan(y_pred), np.isnan(y)) # Drop NAs w/boolean mask\n # y = np.compress(idx, np.array(y))\n # y_pred = np.compress(idx, np.array(y_pred))\n # w = np.compress(idx, w)\n y_pred_ranked = np.apply_along_axis(rankdata, 0, y_pred)\n y_ranked = np.apply_along_axis(rankdata, 0, y)\n return _weighted_pearson(y_pred_ranked, y_ranked, w, pearson=False)",
"def test_median_type():\n\tmedian(.2)",
"def ninetieth_percentile(y_test, y_pred) :\n return np.abs(y_test - y_pred).sort_values().iloc[int(len(y_test)*0.90)]/60",
"def rank_by_average_normalized_score(benchmarks_unique_coverage_list):\n df_list = [df.set_index('fuzzer') for df in benchmarks_unique_coverage_list]\n combined_df = pd.concat(df_list, axis=1).astype(float).T\n scores = data_utils.experiment_rank_by_average_normalized_score(combined_df)\n return scores"
]
| [
"0.68291473",
"0.6501168",
"0.6212198",
"0.6049667",
"0.6025934",
"0.60197026",
"0.58614886",
"0.57373714",
"0.55942005",
"0.55923915",
"0.5583715",
"0.5572583",
"0.55200773",
"0.5506094",
"0.55004585",
"0.5487513",
"0.5483416",
"0.54309464",
"0.54106146",
"0.536388",
"0.5362077",
"0.5358153",
"0.53490794",
"0.5317811",
"0.53172696",
"0.5311951",
"0.5288804",
"0.5274568",
"0.52560335",
"0.5245242"
]
| 0.6677407 | 1 |
Calculate the FlignerPolicello test on samples X and Y. It tests whether they have the same median, but without assumption on shape or scale of the distributions. However, it assumes that X and Y are from two different symmetric distributions. | def fligner_policello_test(X, Y):
P_i = []
for x in X:
count = 0
for y in Y:
if y <= x:
count += 1
P_i.append(count)
Q_j = []
for y in Y:
count = 0
for x in X:
if x <= y:
count += 1
Q_j.append(count)
P_i = np.array(P_i)
Q_j = np.array(Q_j)
P_bar = np.average(P_i)
Q_bar = np.average(Q_j)
V1 = sum((P_i - P_bar) ** 2)
V2 = sum((Q_j - Q_bar) ** 2)
z = (sum(Q_j) - sum(P_i)) / (2 * np.sqrt(V1 + V2 + P_bar * Q_bar))
p_value = 2. * norm.sf(abs(z)) # two sided test
return z, p_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test(X, Y, perms=10000, method='pearson', tail='two-tail'):\n\n # Make sure that X and Y are formatted as Numpy arrays.\n X, Y = np.asarray(X, dtype=float), np.asarray(Y, dtype=float)\n\n # Check that X and Y are valid distance matrices.\n if spatial.distance.is_valid_dm(X) == False and spatial.distance.is_valid_y(X) == False:\n raise ValueError('X is not a valid distance matrix')\n if spatial.distance.is_valid_dm(Y) == False and spatial.distance.is_valid_y(Y) == False:\n raise ValueError('Y is not a valid distance matrix')\n\n # If X or Y is a redundant distance matrix, reduce it to a condensed distance matrix.\n if len(X.shape) == 2:\n X = spatial.distance.squareform(X, force='tovector', checks=False)\n if len(Y.shape) == 2:\n Y = spatial.distance.squareform(Y, force='tovector', checks=False)\n\n # Check for size equality.\n if X.shape[0] != Y.shape[0]:\n raise ValueError('X and Y are not of equal size')\n\n # Check for minimum size.\n if X.shape[0] < 3:\n raise ValueError('X and Y should represent at least 3 objects')\n\n # If Spearman correlation is requested, convert X and Y to ranks.\n if method == 'spearman':\n X, Y = stats.rankdata(X), stats.rankdata(Y)\n #print(X)\n #print(Y)\n # Check for valid method.\n elif method != 'pearson':\n raise ValueError('The method should be either \"pearson\" or \"spearman\"')\n\n # Check for valid tail parameter.\n if tail != 'upper' and tail != 'lower' and tail != 'two-tail':\n raise ValueError('The tail should be set to \"upper\", \"lower\", or \"two-tail\"')\n\n\n # Calculate the X and Y residuals, which will be used to compute the covariance under each permutation.\n X_residuals, Y_residuals = X - X.mean(), Y - Y.mean()\n \n # Expand the Y residuals to a redundant matrix.\n Y_residuals_as_matrix = spatial.distance.squareform(Y_residuals, force='tomatrix', checks=False)\n\n # Get the number of objects.\n m = Y_residuals_as_matrix.shape[0]\n\n # Calculate the number of possible matrix permutations.\n n = np.math.factorial(m)\n\n # Initialize an empty array to store temporary permutations of Y_residuals.\n Y_residuals_permuted = np.zeros(Y_residuals.shape[0], dtype=float)\n\n # If the number of requested permutations is greater than the number of possible permutations (m!) or the perms parameter is set to 0, then run a\n # deterministic Mantel test ...\n if perms >= n or perms == 0:\n\n # Initialize an empty array to store the covariances.\n #print (\"!!!!!!!!!n= \",n)\n covariances = np.zeros(n, dtype=float)\n\n # Enumerate all permutations of row/column orders and iterate over them.\n for i, order in enumerate(permutations(range(m))):\n\n # Take a permutation of the matrix.\n Y_residuals_as_matrix_permuted = Y_residuals_as_matrix[order, :][:, order]\n\n # Condense the permuted version of the matrix. \n spatial.distance._distance_wrap.to_vector_from_squareform_wrap(Y_residuals_as_matrix_permuted, Y_residuals_permuted)\n\n # Compute and store the covariance.\n covariances[i] = (X_residuals * Y_residuals_permuted).sum()\n\n else:\n\n # Initialize an empty array to store the covariances.\n covariances = np.zeros(perms, dtype=float)\n\n # Initialize an array to store the permutation order.\n order = np.arange(m)\n\n # Store the veridical covariance in 0th position...\n covariances[0] = (X_residuals * Y_residuals).sum()\n\n # ...and then run the random permutations.\n for i in range(1, perms):\n\n # Choose a random order in which to permute the rows and columns.\n np.random.shuffle(order)\n\n # Take a permutation of the matrix.\n Y_residuals_as_matrix_permuted = Y_residuals_as_matrix[order, :][:, order]\n\n # Condense the permuted version of the matrix. \n spatial.distance._distance_wrap.to_vector_from_squareform_wrap(Y_residuals_as_matrix_permuted, Y_residuals_permuted)\n\n # Compute and store the covariance.\n covariances[i] = (X_residuals * Y_residuals_permuted).sum()\n\n # Calculate the veridical correlation coefficient from the veridical covariance.\n r = covariances[0] / np.sqrt((X_residuals ** 2).sum() * (Y_residuals ** 2).sum())\n\n # Calculate the empirical p-value for the upper or lower tail.\n if tail == 'upper':\n p = (covariances >= covariances[0]).sum() / float(covariances.shape[0])\n elif tail == 'lower':\n p = (covariances <= covariances[0]).sum() / float(covariances.shape[0])\n elif tail == 'two-tail':\n p = (abs(covariances) >= abs(covariances[0])).sum() / float(covariances.shape[0])\n\n # Calculate the standard score.\n z = (covariances[0] - covariances.mean()) / covariances.std()\n\n return r, p, z",
"def _scipy_fk_test(self, mode='median', alpha=0.01):\n q0 = self.get_group_data(0, self.df_test_resampled, ['Q'])\n q1 = self.get_group_data(1, self.df_test_resampled, ['Q'])\n\n with warnings.catch_warnings(): # supress scipy warnings\n warnings.filterwarnings('ignore')\n fstats, pval = fligner(q0, q1, center=mode)\n\n stats_fk = {'z': fstats, 'pval': pval}\n\n if stats_fk['pval'] <= alpha: # With CHI2 approximation\n h = 1\n else:\n h = 0\n\n return h, stats_fk",
"def kernel_two_sample_test(X, Y, permutations=10000):\n\n if X.shape[1] != Y.shape[1]:\n raise ValueError('X and y should have the same dimensionality.')\n\n n = X.shape[0]\n\n # Gaussian kernel\n def _gauss_kernel(d, h):\n return np.sum(np.exp(-d**2/h**2))\n\n def _compute_kernel_statistic(X, Y, h):\n n = X.shape[0]\n m = Y.shape[0]\n\n if h == 0.0:\n h = 1.0\n\n Exy = 2./(n*m)*_gauss_kernel(cdist(X, Y, 'euclidean'), h)\n\n # double these quanitites because pdist produces C(len(X), 2) entries\n # which accounts for half of the total sum for each term in the\n # statistic. We also need to control for the diagnoal; the Guassian\n # kernel will produce ones along the diagnoal.\n Exx = 2./(n*n)*(_gauss_kernel(pdist(X, 'euclidean'), h) + .5*n)\n Eyy = 2./(m*m)*(_gauss_kernel(pdist(Y, 'euclidean'), h) + .5*m)\n\n return Exx + Eyy - Exy\n\n # Pool the samples.\n S = np.vstack((X, Y))\n\n # Computer kernel width\n h = np.mean(pdist(S, 'euclidean'))\n\n # Compute the observed statistic.\n t_star = _compute_kernel_statistic(X, Y, h)\n Tj = np.zeros(permutations)\n T = t_star\n\n # Compute resampled test statistics.\n for j in range(permutations):\n np.random.shuffle(S)\n Xp, Yp = S[:n], S[n:]\n tj = _compute_kernel_statistic(Xp, Yp, h)\n Tj[j] = tj\n\n # Fraction of samples larger than observed t_star (with error correction).\n f = np.sum(Tj >= T)+1\n p = f/(permutations+1)\n return p",
"def lpaired(x,y):\r\n samples = ''\r\n while samples not in ['i','r','I','R','c','C']:\r\n print '\\nIndependent or related samples, or correlation (i,r,c): ',\r\n samples = raw_input()\r\n\r\n if samples in ['i','I','r','R']:\r\n print '\\nComparing variances ...',\r\n# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112\r\n r = obrientransform(x,y)\r\n f,p = F_oneway(pstats.colex(r,0),pstats.colex(r,1))\r\n if p<0.05:\r\n vartype='unequal, p='+str(round(p,4))\r\n else:\r\n vartype='equal'\r\n print vartype\r\n if samples in ['i','I']:\r\n if vartype[0]=='e':\r\n t,p = ttest_ind(x,y,0)\r\n print '\\nIndependent samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n if len(x)>20 or len(y)>20:\r\n z,p = ranksums(x,y)\r\n print '\\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)\r\n else:\r\n u,p = mannwhitneyu(x,y)\r\n print '\\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)\r\n\r\n else: # RELATED SAMPLES\r\n if vartype[0]=='e':\r\n t,p = ttest_rel(x,y,0)\r\n print '\\nRelated samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n t,p = ranksums(x,y)\r\n print '\\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)\r\n else: # CORRELATION ANALYSIS\r\n corrtype = ''\r\n while corrtype not in ['c','C','r','R','d','D']:\r\n print '\\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',\r\n corrtype = raw_input()\r\n if corrtype in ['c','C']:\r\n m,b,r,p,see = linregress(x,y)\r\n print '\\nLinear regression for continuous variables ...'\r\n lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]\r\n pstats.printcc(lol)\r\n elif corrtype in ['r','R']:\r\n r,p = spearmanr(x,y)\r\n print '\\nCorrelation for ranked variables ...'\r\n print \"Spearman's r: \",round(r,4),round(p,4)\r\n else: # DICHOTOMOUS\r\n r,p = pointbiserialr(x,y)\r\n print '\\nAssuming x contains a dichotomous variable ...'\r\n print 'Point Biserial r: ',round(r,4),round(p,4)\r\n print '\\n\\n'\r\n return None",
"def test__repeated_median(repeated_median):\n x, y, *_ = repeated_median\n assert repeated_median_slope(x, y) == 5.0",
"def L2_dists(x, y):\n #print(x.shape)\n #print(y.shape)\n dists = -2 * np.matmul(x, y.T)\n dists += np.sum(x**2)[np.newaxis]\n dists += np.sum(y**2)\n return np.sqrt(dists)",
"def compute_dists(x, y):\r\n \r\n return (x - y.permute(0, 2, 1)) ** 2",
"def test_F(x, y, level):\n if len(x) < 2 or len(y) < 2:\n return True\n vx = np.var(x, 0, ddof=1)\n vy = np.var(y, 0, ddof=1)\n vx, vy = vx[vx*vy>0], vy[vx*vy>0]\n if len(vx)==0:\n return False\n F = vx/vy\n p_value = stat.f.cdf(F, len(x)-1, len(y)-1)\n p_value = 2*np.min([p_value, 1-p_value], axis=0)\n if np.any(p_value < level):\n return False\n else:\n return True",
"def test_posteriors_good_data(self):\r\n first = [0, 0.25, 0.5, 1, 0.25]\r\n second = [0.25, 0.5, 0, 0.1, 1]\r\n product = [0, 0.125, 0, 0.1, 0.25]\r\n for obs, exp in zip(posteriors(first, second), product):\r\n self.assertFloatEqual(obs, exp)",
"def ks_test(x, y=None, alt=\"two sided\", exact=None, warn_for_ties=True):\r\n # translation from R 2.4\r\n num_x = len(x)\r\n num_y = None\r\n x = zip(x, zeros(len(x), int))\r\n lo = [\"less\", \"lo\", \"lower\", \"l\", \"lt\"]\r\n hi = [\"greater\", \"hi\", \"high\", \"h\", \"g\", \"gt\"]\r\n two = [\"two sided\", \"2\", 2, \"two tailed\", \"two\", \"two.sided\"]\r\n Pval = None\r\n # in anticipation of actually implementing the 1-sample cases\r\n if y is not None:\r\n num_y = len(y)\r\n y = zip(y, ones(len(y), int))\r\n n = num_x * num_y / (num_x + num_y)\r\n combined = x + y\r\n if len(set(combined)) < num_x + num_y:\r\n ties = True\r\n else:\r\n ties = False\r\n\r\n combined = array(combined, dtype=[('stat', float), ('sample', int)])\r\n combined.sort(order='stat')\r\n cumsum = zeros(combined.shape[0], float)\r\n scales = array([1 / num_x, -1 / num_y])\r\n indices = combined['sample']\r\n cumsum = scales.take(indices)\r\n cumsum = cumsum.cumsum()\r\n if exact is None:\r\n exact = num_x * num_y < 1e4\r\n\r\n if alt in two:\r\n stat = max(fabs(cumsum))\r\n elif alt in lo:\r\n stat = -cumsum.min()\r\n elif alt in hi:\r\n stat = cumsum.max()\r\n else:\r\n raise RuntimeError(\"Unknown alt: %s\" % alt)\r\n if exact and alt in two and not ties:\r\n Pval = 1 - psmirnov2x(stat, num_x, num_y)\r\n else:\r\n raise NotImplementedError\r\n\r\n if Pval is None:\r\n if alt in two:\r\n Pval = 1 - pkstwo(sqrt(n) * stat)\r\n else:\r\n Pval = exp(-2 * n * stat ** 2)\r\n\r\n if ties and warn_for_ties:\r\n warnings.warn(\"Cannot compute correct KS probability with ties\")\r\n\r\n try: # if numpy arrays were input, the Pval can be an array of len==1\r\n Pval = Pval[0]\r\n except (TypeError, IndexError):\r\n pass\r\n return stat, Pval",
"def KernelTest(x, y):\n\n Result = (np.dot(x_test[x, :], x_train[y, :])+1)**5 # Polynomial\n # Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n # Sum = DotProduct(x, y)\n #Sum = 0.0\n #for i in range(2):\n # Sum = Sum + x_train[x, i]*x_train[y, i]\n # Result = (Sum+1)**5\n \"\"\"\n #Gaussian\n sigma = 1\n if np.ndim(x_test[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_test[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_test[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_test[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_test[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_test[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result",
"def torch_the_same(X, Y, eps=1e-8):\n return (X - Y).abs().min() < eps",
"def evaluate(self, X_test, Y_test):\n \n test_data = zip(X_test, Y_test)\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n \n# Updated for the testing\n# ========================\n return (sum(int(x == y) for (x, y) in test_results) / 100)",
"def compute_similarity(x, y, metric='kl_divergence'):\n from scipy.stats import entropy, pearsonr\n # remove zeros slightly increase divergence\n x = x[x != 0]\n y = y[y != 0]\n # outer join two distributions\n eps = min(x.min(), y.min()) / 10\n xy = pd.concat([x, y], axis=1).add(eps, fill_value=0)\n x = xy.iloc[:, 0]\n y = xy.iloc[:, 1]\n if metric == 'pearson':\n score, _ = pearsonr(x, y)\n else:\n score = entropy(x, y)\n return score",
"def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12",
"def inner_median(x, y):\n intersection = np.intersect1d(x, y)\n return np.median(intersection)",
"def tanimoto_dissimilarity(X, Y, X_batch_size=50, Y_batch_size=50):\n n_features = X.shape[-1]\n if X.ndim == 1:\n X = X.reshape(-1, n_features)\n if Y.ndim == 1:\n Y = Y.reshape(-1, n_features)\n tan_sim = []\n X_total_batches = X.shape[0] // X_batch_size + 1\n Y_total_batches = Y.shape[0] // Y_batch_size + 1\n for X_batch_i in range(X_total_batches):\n X_start_idx = X_batch_i * X_batch_size\n X_end_idx = min((X_batch_i + 1) * X_batch_size, X.shape[0])\n X_batch = X[X_start_idx:X_end_idx, :]\n for Y_batch_i in range(Y_total_batches):\n Y_start_idx = Y_batch_i * Y_batch_size\n Y_end_idx = min((Y_batch_i + 1) * Y_batch_size, Y.shape[0])\n Y_batch = Y[Y_start_idx:Y_end_idx, :]\n\n # adapted from: https://github.com/deepchem/deepchem/blob/\n # 2531eca8564c1dc68910d791b0bcd91fd586afb9/deepchem/trans/\n # transformers.py#L752\n numerator = np.dot(X_batch, Y_batch.T).flatten()\n # equivalent to np.bitwise_and(X_batch, Y_batch), axis=1)\n denominator = n_features - np.dot(1 - X_batch,\n (1 - Y_batch).T).flatten()\n # np.sum(np.bitwise_or(X_rep, Y_rep), axis=1)\n\n tan_sim.append(numerator / denominator)\n tan_sim = np.hstack(tan_sim)\n return 1.0 - tan_sim",
"def test_cosine_hungarian_tolerance_01():\n spec1 = numpy.array([[100, 200, 300, 500],\n [0.1, 0.1, 1.0, 1.0]], dtype=\"float\").T\n\n spec2 = numpy.array([[105, 205.1, 300, 500.1],\n [0.1, 0.1, 1.0, 1.0]], dtype=\"float\").T\n\n matching_pairs = collect_peak_pairs(spec1, spec2, tolerance=0.2)\n assert len(matching_pairs) == 2, \"Expected different number of matching peaks\"\n assert matching_pairs == [(2, 2, 1.0), (3, 3, 1.0)], \"Expected different matchin pairs.\"",
"def test_likelihoods_equal_priors(self):\r\n equal = [0.25, 0.25, 0.25, 0.25]\r\n unequal = [0.5, 0.25, 0.125, 0.125]\r\n equal_answer = [1, 1, 1, 1]\r\n unequal_answer = [2, 1, 0.5, 0.5]\r\n for obs, exp in zip(likelihoods(equal, equal), equal_answer):\r\n self.assertFloatEqual(obs, exp)\r\n\r\n for obs, exp in zip(likelihoods(unequal, equal), unequal_answer):\r\n self.assertFloatEqual(obs, exp)",
"def check_evaluation_points(x, y):\n assert x.ndim == y.ndim == 1\n assert x.shape == y.shape\n assert x.dtype == y.dtype == np.float64",
"def linfit(x, y, sigma_y):\n npar = 2 #Number of fitting parameters. If we were doing ax^2 + bx + c then npar = 3\n npts = len(x) #Number of data points\n\n if npts < npar:\n print(\"Not enough data points to fit a first order polynomial\")\n return\n\n if npts != len(y) or npts != len(sigma_y):\n print(\"Lengths of the inputs must be the same. len(x) = {}, len(y) = {},\"\n \"len(sigma_y) = {}\".format(len(x),len(y), len(sigma_y)))\n return\n\n #Make sure everything is a np.array that'll make the math easier\n x = np.array(x)\n y = np.array(y)\n sigma_y = np.array(sigma_y)\n\n sw = sum(1 / sigma_y**2)\n sy = sum(y / sigma_y**2)\n sx = sum(x / sigma_y**2)\n sx2= sum((x / sigma_y)**2)\n sxy= sum((y*x / sigma_y)**2)\n\n #Find the fit parameters\n Delta = sw*sx2 - sx**2\n a = (sx2*sy - sx*sxy)/Delta\n b = (sxy*sw - sx*sy)/Delta\n da = np.sqrt(sx2 / Delta)\n db = np.sqrt(sw / Delta)\n\n fit = a + b*x\n chi2 = sum(((y - fit) / sigma_y)**2)\n if npts > npar:\n chi2red = chi2 / (npts - npar)\n else:\n chi2red = 0\n\n return [a, da, b, db, chi2red]",
"def checkMedian(nums1, nums2, x=None, x_dash=None,y=None,y_dash=None,median=False):\n # odd array\n if (len(nums1) + len(nums2)) % 2 == 1:\n if x != None and x_dash != None and y != None and y_dash != None:\n if nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n median = nums1[x]\n return median\n elif nums1[x] <= nums2[y] and nums1[x_dash] >= nums2[y]:\n median = nums2[y]\n return median\n else:\n return False\n elif x == None and x_dash != None and y != None and y_dash == None:\n if nums1[x_dash] >= nums2[y]:\n median = nums2[y]\n return median\n else:\n return False\n elif x != None and x_dash == None and y != None and y_dash != None:\n if nums2[y] >= nums1[x]:\n median = nums2[y]\n return median\n elif nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n\n median = nums1[x]\n return median\n else:\n return False\n # even array\n else:\n if x != None and x_dash != None and y != None and y_dash != None:\n if nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n if nums1[x_dash] <= nums2[y_dash]:\n median = (nums1[x] + nums1[x_dash])/2\n elif nums1[x_dash] > nums2[y_dash]:\n median = (nums1[x] + nums2[y_dash])/2\n return median\n elif nums1[x] <= nums2[y] and nums1[x_dash] >= nums2[y]:\n if nums2[y_dash] <= nums1[x_dash]:\n median = (nums2[y] + nums2[y_dash])/2\n elif nums2[y_dash] > nums1[x_dash]:\n median = (nums2[y] + nums1[x_dash])/2\n return median\n else:\n return False\n elif x == None and x_dash != None and y != None and y_dash == None:\n if nums1[x_dash] >= nums2[y]:\n median = (nums2[y] + nums1[x_dash])/2\n return median\n else:\n return False\n elif x != None and x_dash == None and y != None and y_dash != None:\n if nums2[y] >= nums1[x]:\n median = (nums2[y] + nums2[y_dash])/2\n return median\n elif nums1[x] >= nums2[y] and nums1[x] <= nums2[y_dash]:\n median = (nums1[x] + nums2[y_dash])/2\n return median\n else:\n return False\n elif x != None and x_dash == None and y == None and y_dash != None:\n if nums1[x] <= nums2[y_dash]:\n median = (nums1[x] + nums2[y_dash])/2\n return median\n else:\n return False\n elif x == None and x_dash != None and y != None and y_dash != None:\n if nums2[y] <= nums1[x_dash]:\n if nums1[x_dash]<= nums2[y_dash]:\n median = (nums2[y] + nums1[x_dash])/2\n return median\n elif nums1[x_dash] > nums2[y_dash]:\n median = (nums2[y] + nums2[y_dash])/2\n return median\n else:\n return False",
"def _correlation_test_helper(\n X: Union[np.ndarray, spmatrix],\n Y: np.ndarray,\n n_perms: Optional[int] = None,\n seed: Optional[int] = None,\n confidence_level: float = 0.95,\n **kwargs,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n\n def perm_test_extractor(\n res: Sequence[Tuple[np.ndarray, np.ndarray]]\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n pvals, corr_bs = zip(*res)\n pvals = np.sum(pvals, axis=0) / float(n_perms)\n\n corr_bs = np.concatenate(corr_bs, axis=0)\n corr_ci_low, corr_ci_high = np.quantile(corr_bs, q=ql, axis=0), np.quantile(\n corr_bs, q=qh, axis=0\n )\n\n return pvals, corr_ci_low, corr_ci_high\n\n if not (0 <= confidence_level <= 1):\n raise ValueError(\n f\"Expected `confidence_level` to be in interval `[0, 1]`, found `{confidence_level}`.\"\n )\n\n n = X.shape[1] # genes x cells\n ql = 1 - confidence_level - (1 - confidence_level) / 2.0\n qh = confidence_level + (1 - confidence_level) / 2.0\n\n if issparse(X) and not isspmatrix_csr(X):\n X = csr_matrix(X)\n\n corr = _mat_mat_corr_sparse(X, Y) if issparse(X) else _mat_mat_corr_dense(X, Y)\n\n # see: https://en.wikipedia.org/wiki/Pearson_correlation_coefficient#Using_the_Fisher_transformation\n mean, se = np.arctanh(corr), 1.0 / np.sqrt(n - 3)\n z_score = (np.arctanh(corr) - np.arctanh(0)) * np.sqrt(n - 3)\n\n z = norm.ppf(qh)\n corr_ci_low = np.tanh(mean - z * se)\n corr_ci_high = np.tanh(mean + z * se)\n pvals = 2 * norm.cdf(-np.abs(z_score))\n\n return corr, pvals, corr_ci_low, corr_ci_high",
"def fidelity(A: numpy.ndarray, B: numpy.ndarray) -> float:\n Asqrtm = scipy.linalg.sqrtm(A)\n return (numpy.trace(scipy.linalg.sqrtm(Asqrtm@B@Asqrtm)).real)**2",
"def test_sim(vec_x, vec_y, feature_list, func):\n feature_map_x = create_feature_map(vec_x, feature_list)\n feature_map_y = create_feature_map(vec_y, feature_list)\n\n if func == 0:\n return cosine_similarity(feature_map_x, feature_map_y)\n\n return minmax(feature_map_x, feature_map_y)",
"def test_f_two_sample(self):\r\n\r\n # The expected values in this test are obtained through R.\r\n # In R the F test is var.test(x,y) different alternative hypotheses\r\n # can be specified (two sided, less, or greater).\r\n # The vectors are random samples from a particular normal distribution\r\n #(mean and sd specified).\r\n\r\n # a: 50 elem, mean=0 sd=1\r\n a = [-0.70701689, -1.24788845, -1.65516470, 0.10443876, -0.48526915,\r\n -0.71820656, -1.02603596, 0.03975982, -2.23404324, -0.21509363,\r\n 0.08438468, -0.01970062, -0.67907971, -0.89853667, 1.11137131,\r\n 0.05960496, -1.51172084, -0.79733957, -1.60040659, 0.80530639,\r\n -0.81715836, -0.69233474, 0.95750665, 0.99576429, -1.61340216,\r\n -0.43572590, -1.50862327, 0.92847551, -0.68382338, -1.12523522,\r\n -0.09147488, 0.66756023, -0.87277588, -1.36539039, -0.11748707,\r\n -1.63632578, -0.31343078, -0.28176086, 0.33854483, -0.51785630,\r\n 2.25360559, -0.80761191, 1.18983499, 0.57080342, -1.44601700,\r\n -0.53906955, -0.01975266, -1.37147915, -0.31537616, 0.26877544]\r\n\r\n # b: 50 elem, mean=0, sd=1.2\r\n b = [\r\n 0.081418743, 0.276571612, -\r\n 1.864316504, 0.675213612, -0.769202643,\r\n 0.140372825, -1.426250184, 0.058617884, -\r\n 0.819287409, -0.007701916,\r\n -0.782722020, -\r\n 0.285891593, 0.661980419, 0.383225191, 0.622444946,\r\n -0.192446150, 0.297150571, 0.408896059, -\r\n 0.167359383, -0.552381362,\r\n 0.982168338, 1.439730446, 1.967616101, -\r\n 0.579607307, 1.095590943,\r\n 0.240591302, -1.566937143, -\r\n 0.199091349, -1.232983905, 0.362378169,\r\n 1.166061081, -0.604676222, -\r\n 0.536560206, -0.303117595, 1.519222792,\r\n -0.319146503, 2.206220810, -\r\n 0.566351124, -0.720397392, -0.452001377,\r\n 0.250890097, 0.320685395, -\r\n 1.014632725, -3.010346273, -1.703955054,\r\n 0.592587381, -1.237451255, 0.172243366, -0.452641122, -0.982148581]\r\n\r\n # c: 60 elem, mean=5, sd=1\r\n c = [4.654329, 5.242129, 6.272640, 5.781779, 4.391241, 3.800752,\r\n 4.559463, 4.318922, 3.243020, 5.121280, 4.126385, 5.541131,\r\n 4.777480, 5.646913, 6.972584, 3.817172, 6.128700, 4.731467,\r\n 6.762068, 5.082983, 5.298511, 5.491125, 4.532369, 4.265552,\r\n 5.697317, 5.509730, 2.935704, 4.507456, 3.786794, 5.548383,\r\n 3.674487, 5.536556, 5.297847, 2.439642, 4.759836, 5.114649,\r\n 5.986774, 4.517485, 4.579208, 4.579374, 2.502890, 5.190955,\r\n 5.983194, 6.766645, 4.905079, 4.214273, 3.950364, 6.262393,\r\n 8.122084, 6.330007, 4.767943, 5.194029, 3.503136, 6.039079,\r\n 4.485647, 6.116235, 6.302268, 3.596693, 5.743316, 6.860152]\r\n\r\n # d: 30 elem, mean=0, sd =0.05\r\n d = [\r\n 0.104517366, 0.023039678, 0.005579091, 0.052928250, 0.020724823,\r\n -0.060823243, -0.019000890, -\r\n 0.064133996, -0.016321594, -0.008898334,\r\n -0.027626992, -0.051946186, 0.085269587, -\r\n 0.031190678, 0.065172938,\r\n -0.054628573, 0.019257306, -\r\n 0.032427056, -0.058767356, 0.030927400,\r\n 0.052247357, -\r\n 0.042954937, 0.031842104, 0.094130522, -0.024828465,\r\n 0.011320453, -0.016195062, 0.015631245, -0.050335598, -0.031658335]\r\n\r\n a, b, c, d = map(array, [a, b, c, d])\r\n self.assertEqual(map(len, [a, b, c, d]), [50, 50, 60, 30])\r\n\r\n # allowed error. This big, because results from R\r\n # are rounded at 4 decimals\r\n error = 1e-4\r\n\r\n self.assertFloatEqual(f_two_sample(a, a), (49, 49, 1, 1), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b), (49, 49, 0.8575, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(b, a), (49, 49, 1.1662, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='low'),\r\n (49, 49, 0.8575, 0.2963), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='high'),\r\n (49, 49, 0.8575, 0.7037), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, c),\r\n (49, 59, 0.6587, 0.1345), eps=error)\r\n # p value very small, so first check df's and F value\r\n self.assertFloatEqualAbs(f_two_sample(d, a, tails='low')[0:3],\r\n (29, 49, 0.0028), eps=error)\r\n assert f_two_sample(d, a, tails='low')[3] < 2.2e-16 # p value\r",
"def L2_dists_vectorized(x, y):\n dists = -2 * np.matmul(x, y.T)\n dists += np.sum(x**2, axis=1)[:, np.newaxis]\n dists += np.sum(y**2, axis=1)\n return np.sqrt(dists)",
"def similarity_function(x, y):\n\n def safe_get(field, row, default_value):\n # Safely get a value from the Row. If the value is None, get the\n # default value.\n return row[field] if row[field] is not None else default_value\n\n # Extract the values for the categorical and continuous features for both\n # the x and y samples. Use an empty string as the default value for missing\n # categorical fields and 0 for the continuous ones.\n x_categorical_features = [safe_get(k, x, \"\") for k in CATEGORICAL_FEATURES]\n x_continuous_features = [safe_get(k, x, 0) for k in CONTINUOUS_FEATURES]\n y_categorical_features = [safe_get(k, y, \"\") for k in CATEGORICAL_FEATURES]\n y_continuous_features = [safe_get(k, y, 0) for k in CONTINUOUS_FEATURES]\n\n # Here a larger distance indicates a poorer match between categorical variables.\n j_d = distance.hamming(x_categorical_features, y_categorical_features)\n j_c = distance.canberra(x_continuous_features, y_continuous_features)\n\n # Take the product of similarities to attain a univariate similarity score.\n # Add a minimal constant to prevent zero values from categorical features.\n # Note: since both the distance function return a Numpy type, we need to\n # call the |item| function to get the underlying Python type. If we don't\n # do that this job will fail when performing KDE due to SPARK-20803 on\n # Spark 2.2.0.\n return abs((j_c + 0.001) * j_d).item()",
"def testSymmetric(self):\n with self.test_context() as session:\n nClasses = 5\n nPoints = 10\n tolerance = 1e-4\n epsilon = 1e-3\n F = tf.placeholder(settings.float_type)\n F_data = np.ones((nPoints, nClasses))\n feed = {F: F_data}\n rng = np.random.RandomState(1)\n Y = rng.randint(nClasses, size=(nPoints, 1))\n\n l = gpflow.likelihoods.MultiClass(nClasses)\n l.invlink.epsilon = epsilon\n l.compile()\n\n mu, _ = session.run(l.predict_mean_and_var(F, F), feed_dict=feed)\n pred = session.run(l.predict_density(F, F, Y), feed_dict=feed)\n variational_expectations = session.run(\n l.variational_expectations(F, F, Y), feed_dict=feed)\n expected_mu = (1. / nClasses * (1. - epsilon) + (1. - 1. / nClasses) * \\\n epsilon / (nClasses - 1)) * np.ones((nPoints, 1))\n\n self.assertTrue(np.allclose(mu, expected_mu, tolerance,\n tolerance)) # assert_allclose() would complain about shape mismatch\n expected_log_denisty = np.log(expected_mu)\n self.assertTrue(np.allclose(pred, expected_log_denisty, 1e-3, 1e-3))\n validation_variational_expectation = 1. / nClasses * np.log(1. - epsilon) + \\\n (1. - 1. / nClasses) * np.log(epsilon / (nClasses - 1))\n assert_allclose(\n variational_expectations,\n np.ones((nPoints, 1)) * validation_variational_expectation,\n tolerance, tolerance)",
"def feq(x, y, precision=0.0000005):\n x = np.asanyarray(x)\n y = np.asanyarray(y)\n boolean = abs(x-y) <= (abs(x+y)*precision)\n return boolean"
]
| [
"0.5877307",
"0.5653117",
"0.56165034",
"0.56134",
"0.5569941",
"0.54437655",
"0.5397881",
"0.53854823",
"0.53649646",
"0.53622454",
"0.53537786",
"0.5341069",
"0.5332188",
"0.5312273",
"0.52843684",
"0.5277466",
"0.5256211",
"0.5219326",
"0.5143481",
"0.51307106",
"0.5129633",
"0.5086796",
"0.50861615",
"0.50828016",
"0.5067545",
"0.5060435",
"0.5057164",
"0.505528",
"0.5053563",
"0.50462973"
]
| 0.7405578 | 0 |
Given the path from the current working directory (cwd) to a root directory, and a path from that root directory to some file, derives the path from the cwd to that file. | def from_cwd(root, path):
return normpath(join(root, normpath(path))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cwd_in_path():\n ...",
"def file_path(file_name, path):\n return path.rstrip('\\/') + \"/{0}\".format(file_name) if path else os.getcwd() + \"/{0}\".format(file_name)",
"def relative_path(root_dir, dirpath, f):\n full = os.path.join(dirpath, f)\n if not root_dir:\n return full\n if not full.startswith(root_dir):\n print(\"ERROR - bad path for root\", full)\n return None\n full = full[len(root_dir):]\n if full.startswith(\"/\"):\n return full[1:]\n return full",
"def get_relative_path(self, file_path):\n file_path = os.path.abspath(file_path)\n if self.base_dir is not None:\n file_path = file_path.replace(os.path.abspath(self.base_dir), \"\")\n assert file_path[0] == \"/\"\n file_path = file_path[1:]\n return file_path",
"def get_path(path):\n if _prefix and not '/' in path:\n path = _prefix + path\n\n if not _cwd:\n return path\n\n return join(_cwd, path)",
"def getPath(filename):\n\n if os.path.isabs(filename):\n pathfile = filename\n else:\n filename = filename.lstrip('/\\.')\n filename = filename.replace('/', '\\\\')\n pathfile = os.path.join(os.getcwd(), filename)\n \n return pathfile",
"def cwd (self, path):\r\n pass",
"def get_path(self, path):\n if path.startswith('/') and not path.startswith('~/'):\n return os.getcwd() + '/' + path\n else:\n return path",
"def abspath(fpath):\n from os import path, getcwd, chdir\n original = getcwd()\n chdir(reporoot)\n result = path.abspath(path.expanduser(fpath))\n chdir(original)\n return result",
"def get_path(self, project_file=None):\n root = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', '..')\n )\n if project_file:\n return os.path.join(root, project_file)\n else:\n return root",
"def find_directory_with_a_file(\n filename: str,\n cwd: Optional[Union[str, Path]] = None) -> Optional[Path]:\n if cwd is None:\n curr_dir = Path(os.getcwd()).absolute()\n else:\n curr_dir = Path(cwd).absolute()\n\n pathname = curr_dir / filename\n if pathname.exists():\n return curr_dir\n\n for work_dir in curr_dir.parents:\n pathname = work_dir / filename\n if pathname.exists():\n return work_dir\n\n return None",
"def resolve(fname):\n if os.path.dirname(__file__):\n return os.path.dirname(__file__) + \"/../common/\" + fname\n else:\n return \"/common/\" + fname",
"def _resolve_relative_path(filepath: str):\n if not filepath:\n return None\n\n inf_path = os.path.join(os.path.dirname(__file__), filepath)\n\n return inf_path",
"def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'",
"def getProjectRootFile(fname):\n return os.path.join(Configurations.getProjectRootDir(), fname)",
"def root_dir():\r\n return Path(__file__).parent.parent",
"def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))",
"def get_path(relative_path=None):\n\n root_path = os.path.dirname(os.path.dirname(__file__))\n\n if relative_path is None:\n return root_path\n else:\n return os.path.abspath(os.path.join(root_path, relative_path))",
"def get_path_to(self, *args):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))",
"def get_path_relative_to_http_root(file_path):\n return os.path.relpath(file_path, get_http_path_prefix())",
"def _abs_path(fn):\n return os.path.join(os.path.dirname(__file__), fn)",
"def absPath(path):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)",
"def get_abs_path(file_path, relative_path):\n import os\n dir_path = os.path.dirname(file_path)\n abs_path = os.path.join(dir_path, relative_path)\n return abs_path",
"def cwd_for_path(self, path):\n os_path = to_os_path(path, self.root_dir)\n # in the case of notebooks and kernels not being on the same filesystem,\n # walk up to root_dir if the paths don't exist\n while not os.path.isdir(os_path) and os_path != self.root_dir:\n os_path = os.path.dirname(os_path)\n return os_path",
"def project_path(cur_path=''):\n if not cur_path:\n cur_path = __file__\n real_path = os.path.realpath(cur_path)\n # path of upper-level directory\n upper_folder = os.path.split(real_path)[0]\n # path of topmost-level directory (trunk)\n return os.path.split(upper_folder)[0]",
"def pathtofolder():\n return os.getcwd()",
"def _adjust_path(self, file):\n path_component = '/osm_pla/test/'\n real_path = os.path.realpath(file)\n if path_component not in real_path:\n return os.path.dirname(real_path) + path_component + os.path.basename(real_path)\n else:\n return real_path",
"def to_rooted_path(self, filepath: Union[Path, PathLike, str]) -> PurePath:\n fp = Path(filepath)\n if not fp.is_absolute():\n fp = Path(self._root, filepath)\n \n return PurePath(fp.absolute())",
"def relative_path(__file__, path):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), path))",
"def directory_root():\n\timport os\n\treturn os.path.join(os.path.dirname(__file__), '../..')"
]
| [
"0.7066784",
"0.6979399",
"0.68085057",
"0.6724433",
"0.6708733",
"0.66995543",
"0.663998",
"0.66332114",
"0.6588345",
"0.6575982",
"0.652172",
"0.643652",
"0.6431836",
"0.6407975",
"0.6388555",
"0.63809216",
"0.6373353",
"0.63663834",
"0.6355308",
"0.6333472",
"0.632867",
"0.62946516",
"0.62649006",
"0.62593365",
"0.6242774",
"0.62405103",
"0.6233804",
"0.623034",
"0.62281924",
"0.62163234"
]
| 0.77983445 | 0 |
Parses the .ciignore file to get the set of ignored directories. | def get_ignored_dirs(ci_ignore_path):
with open(ci_ignore_path, 'r') as ignore_file:
return set([
normpath(line.strip())
for line in ignore_file.readlines()
if not line.startswith('#') and not is_blank(line)
]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_gitignore():\n excludes = []\n gitignore = Path(\".gitignore\")\n if gitignore.exists():\n with gitignore.open() as f:\n excludes += f.read().split(\"\\n\")\n else:\n raise ValueError(\n \"No exclude configuration option and no .gitignore file present\"\n )\n return excludes",
"def _get_ignore_list(self) -> List[str]:\n if not self.exists():\n return []\n if self._file_exists():\n with open(self._path, \"r\", encoding=DefaultOpenEncoding.READ) as fh:\n return [line.rstrip() for line in fh if line]\n return []",
"def ignore_from_repo(self, directory, ignore):\n for filename in os.listdir(directory):\n if not filename.endswith('.rpm'):\n continue\n _, basename = filename.split('-', 1)\n ignore.add(basename[:-4])",
"def gitignore(self):\n patterns = []\n for path in ('.gitignore', '.git/info/exclude'):\n try:\n with open(pjoin(self.options.target_repo.location, path)) as f:\n patterns.extend(f)\n except FileNotFoundError:\n pass\n except IOError as e:\n logger.warning(f'failed reading {path!r}: {e}')\n return PathSpec.from_lines('gitwildmatch', patterns)",
"def _parse_gitignore(self):\n gitignore_path = os.path.join(self.config.path, '.gitignore')\n lines = [] # contains each line of the .gitignore file\n results = [] # contains the result regexp patterns\n neg_results = [] # contains the result negative regexp patterns\n\n with open(gitignore_path, 'r') as f:\n lines = f.readlines()\n\n # Sort the line in order to have inverse pattern first\n lines.sort(self._gitline_comparator)\n\n # For each git pattern, convert it to regexp pattern\n for line in lines:\n regexp = self._gitline_to_regexp(line)\n if regexp is not None:\n if not line.startswith('!'):\n results.append(regexp)\n else:\n neg_results.append(regexp)\n\n return neg_results, results",
"def parse_ignore_file(file):\n from os import path\n ignore_keys = []\n if path.isfile(file):\n with open(file) as f:\n for line in f:\n if line.strip != \"\":\n ignore_keys.append(line.strip())\n return ignore_keys",
"def _parse_gitignore(self):\n gitignore_path = os.path.join(self.project_path, '.gitignore')\n lines = [] # contains each line of the .gitignore file\n results = [] # contains the result regexp patterns\n neg_results = [] # contains the result negative regexp patterns\n\n try:\n with open(gitignore_path, 'r') as f:\n lines = f.readlines()\n except IOError as err:\n raise BaboonException(format(err))\n\n # Sort the line in order to have inverse pattern first\n lines = sorted(lines, key=cmp_to_key(self._gitline_comparator))\n\n # For each git pattern, convert it to regexp pattern\n for line in lines:\n regexp = self._gitline_to_regexp(line)\n if regexp is not None:\n if not line.startswith('!'):\n results.append(regexp)\n else:\n neg_results.append(regexp)\n\n return neg_results, results",
"def _populate_gitignore_items(self):\n\n # Reset the include_regexps and exclude_regexps.\n self.include_regexps = []\n self.exclude_regexps = [re.compile('.*\\.git/.*\\.lock'),\n re.compile('.*\\.baboon-timestamp'),\n re.compile('.*baboon.*')]\n\n # If there's a .gitignore file in the watched directory.\n if os.path.exists(self.gitignore_path):\n # Parse the gitignore.\n ignores = self._parse_gitignore()\n if ignores is not None:\n # Populate the regexps list with the ignores result.\n self.include_regexps += [re.compile(x) for x in ignores[0]]\n self.exclude_regexps += [re.compile(x) for x in ignores[1]]",
"def parse_gitignore(gipath):\n\n gitignore_file = open(os.path.abspath(gipath), 'r')\n gilist = []\n for row in gitignore_file.readlines():\n if not row.startswith('#') and row != '\\n':\n if row.endswith('/\\n'):\n gilist.append(row[:-2])\n else:\n gilist.append(row[:-1])\n gitignore_file.close()\n return gilist",
"def get_tracignore_patterns(env_parent_dir):\n path = os.path.join(env_parent_dir, '.tracignore')\n try:\n lines = [line.strip() for line in read_file(path).splitlines()]\n except IOError:\n return ['.*']\n return [line for line in lines if line and not line.startswith('#')]",
"def get_unignored_file_paths(ignore_list=None, whitelist=None):\n unignored_files = []\n if ignore_list is None:\n ignore_list = []\n if whitelist is None:\n whitelist = []\n\n for root, dirs, files in os.walk(\".\"):\n floyd_logger.debug(\"Root:%s, Dirs:%s\", root, dirs)\n\n if ignore_path(unix_style_path(root), ignore_list, whitelist):\n # Reset dirs to avoid going further down this directory.\n # Then continue to the next iteration of os.walk, which causes\n # everything in this directory to be ignored.\n #\n # Note that whitelisted files that are within directories that are\n # ignored will not be whitelisted. This follows the expected\n # behavior established by .gitignore logic:\n # \"It is not possible to re-include a file if a parent directory of\n # that file is excluded.\"\n # https://git-scm.com/docs/gitignore#_pattern_format\n dirs[:] = []\n floyd_logger.debug(\"Ignoring directory : %s\", root)\n continue\n\n for file_name in files:\n file_path = unix_style_path(os.path.join(root, file_name))\n if ignore_path(file_path, ignore_list, whitelist):\n floyd_logger.debug(\"Ignoring file : %s\", file_name)\n continue\n\n unignored_files.append(os.path.join(root, file_name))\n\n return unignored_files",
"def ignore(ui, repo, *user_files, **opts):\n # The doc string below will show up in hg help.\n\n use_glob = opts.get('globs')\n # Use HG API call to get all files in state \"unknown\". Those are relative to the repo root\n unknown_files = repo.status(unknown=True)[4]\n # Bring user-given file names into the \"relative-to-HG-root\" format\n files_absolute = [ os.path.realpath(p) for p in user_files ]\n files = [ os.path.relpath(p, repo.root) for p in files_absolute ]\n\n # Check if ignore file exists, otherwise create it\n ignore_filename = os.path.normpath(os.path.join(repo.root, \".hgignore\"))\n try:\n with open(ignore_filename, \"a\") as f: pass\n except:\n with open(ignore_filename, \"w\") as f: f.write()\n\n # Check if the files given on the command line are really unknown to HG\n already_ignored = set()\n if use_glob:\n for f in files:\n if is_already_ignored(f, ignore_filename):\n ui.write(\"Pattern %s is already active.\\n\" % (f,))\n already_ignored.add(f)\n else:\n for f in files:\n if os.path.isdir(f):\n # Directories are not tracked, but we want to add them to .hgignore, anyway. \n continue\n if f not in unknown_files:\n if is_already_ignored(f, ignore_filename):\n ui.write(\"File %s is already ignored.\\n\" % (f,))\n already_ignored.add(f)\n else:\n raise util.Abort(\"File %s is not in state UNKNOWN.\" % (f,))\n\n # Prepare list of files to be ignored in .hgignore line-by-line syntax\n ignores = \"\"\n for f in (set(files) - already_ignored):\n ignores += f\n ignores += '\\n'\n if len(ignores):\n ui.write(\"Ignored:\\n%s\" % (ignores,))\n\n # Search for the glob section in .hgignore; when found, insert filenames after\n found_glob_section = False\n for line in fileinput.input(ignore_filename, inplace=True, mode=\"rU\"):\n if re.match(\"^syntax:.*glob\", line):\n sys.stdout.write(line)\n sys.stdout.write(ignores)\n found_glob_section = True\n else:\n sys.stdout.write(line)\n\n # No \"syntax: glob\" section in .hgignore file found -> create one\n if not found_glob_section:\n with open(ignore_filename, \"a\") as ignore_file:\n ignore_file.write(\"syntax: glob\\n\")\n ignore_file.write(ignores)",
"def _parse_hgignore(self):\n hgignore_path = os.path.join(self.config.path, '.hgignore')\n lines = [] # contains each line of the .hgignore file\n results = [] # contains the result regexp patterns\n\n # Mercurial supports several pattern syntaxes. The default\n # syntax used is Python/Perl-style regular expressions.\n syntax = 'regexp'\n\n with open(hgignore_path, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n # Mercurial supports several pattern syntaxes. The default\n # syntax used is Python/Perl-style regular expressions.\n # To change the syntax used, use a line of the following\n # form:\n #\n # syntax: NAME\n #\n # where NAME is one of the following:\n # regexp\n # Regular expression, Python/Perl syntax.\n # glob\n # Shell-style glob.\n new_syntax = self._get_hgignore_syntax(line)\n if new_syntax is not None:\n syntax = new_syntax\n else:\n if syntax == 'regexp':\n results += line\n elif syntax == 'glob':\n results += fnmatch.translate(line)",
"def gitignored(self, path):\n if path.startswith(self.options.target_repo.location):\n repo_prefix_len = len(self.options.target_repo.location) + 1\n path = path[repo_prefix_len:]\n return self.gitignore.match_file(path)",
"def ignore(self, directory, files):\n ignore_list = []\n ignores = ('build', 'var')\n build = os.path.join(directory, 'build')\n var = os.path.join(directory, 'var')\n for filename in files:\n full_path = os.path.join(directory, filename)\n if full_path.startswith(build):\n ignore_list.append(filename)\n if full_path.startswith(var):\n ignore_list.append(filename)\n\n return ignore_list",
"def _remove_gitignore_files(self, log_prompt: str) -> None:\n try:\n repo = git.Repo(self._content_repo)\n files_to_ignore = repo.ignored(self._facts[\"lint_files\"])\n for file in files_to_ignore:\n logger.info(f\"{log_prompt} - Skipping gitignore file {file}\")\n self._facts[\"lint_files\"] = [\n path\n for path in self._facts[\"lint_files\"]\n if path not in files_to_ignore\n ]\n\n except (git.InvalidGitRepositoryError, git.NoSuchPathError):\n logger.debug(\"No gitignore files is available\")",
"def test_ignores(self, tmpdir):\n from pytest_flake8 import Ignorer\n ignores = [\"E203\", \"b/?.py E204 W205\", \"z.py ALL\", \"*.py E300\"]\n ign = Ignorer(ignores)\n assert ign(tmpdir.join(\"a/b/x.py\")) == \"E203 E204 W205 E300\".split()\n assert ign(tmpdir.join(\"a/y.py\")) == \"E203 E300\".split()\n assert ign(tmpdir.join(\"a/z.py\")) is None",
"def _get_target_files(self) -> List[Path]:\n repo = get_git_repo()\n submodules = repo.submodules # type: ignore\n submodule_paths = [\n self._fname_to_path(repo, submodule.path) for submodule in submodules\n ]\n\n # resolve given paths relative to current working directory\n paths = [p.resolve() for p in self._paths]\n if self._base_commit is not None:\n paths = [\n a\n for a in (self._status.added + self._status.modified)\n # diff_path is a subpath of some element of input_paths\n if any((a == path or path in a.parents) for path in paths)\n ]\n changed_count = len(paths)\n click.echo(f\"| looking at {unit_len(paths, 'changed path')}\", err=True)\n paths = [\n path\n for path in paths\n if all(\n submodule_path not in path.parents\n for submodule_path in submodule_paths\n )\n ]\n if len(paths) != changed_count:\n click.echo(\n f\"| skipping files in {unit_len(submodule_paths, 'submodule')}: \"\n + \", \".join(str(path) for path in submodule_paths),\n err=True,\n )\n\n # Filter out ignore rules, expand directories\n self._ignore_rules_file.seek(0)\n patterns = Parser(self._base_path).parse(self._ignore_rules_file)\n\n file_ignore = FileIgnore(\n base_path=self._base_path, patterns=patterns, target_paths=paths\n )\n\n walked_entries = list(file_ignore.entries())\n click.echo(\n f\"| found {unit_len(walked_entries, 'file')} in the paths to be scanned\",\n err=True,\n )\n filtered: List[Path] = []\n for elem in walked_entries:\n if elem.survives:\n filtered.append(elem.path)\n\n skipped_count = len(walked_entries) - len(filtered)\n if skipped_count:\n click.echo(\n f\"| skipping {unit_len(range(skipped_count), 'file')} based on path ignore rules\",\n err=True,\n )\n\n relative_paths = [path.relative_to(self._base_path) for path in filtered]\n\n return relative_paths",
"def get_git_ignored_files(directory: Path) -> Set[Path]:\n try:\n git_path = shutil.which(\"git\")\n if git_path is None:\n return set()\n output = subprocess.check_output(\n [\n git_path,\n \"status\",\n \"--ignored\",\n \"--untracked-files=all\",\n \"--porcelain=2\",\n str(directory),\n ],\n encoding=\"UTF-8\",\n )\n except subprocess.CalledProcessError:\n # Assume we are not in a directory tracked by git (should be rare in practice).\n return set()\n else:\n result = set()\n for line in output.splitlines():\n if line.startswith(\"!\"):\n # Use os.path.abspath() because it also normalizes the path,\n # something which Path() doesn't do for us.\n p = Path(os.path.abspath(line[1:].strip()))\n result.add(p)\n return result",
"def ignored(self, *paths: PathLike) -> List[str]:\n try:\n proc: str = self.git.check_ignore(*paths)\n except GitCommandError as err:\n # If return code is 1, this means none of the items in *paths\n # are ignored by Git, so return an empty list. Raise the\n # exception on all other return codes.\n if err.status == 1:\n return []\n else:\n raise\n\n return proc.replace(\"\\\\\\\\\", \"\\\\\").replace('\"', \"\").split(\"\\n\")",
"def test_find_not_should_ignore_path_regexp(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n \".airflowignore_glob\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)",
"def get_ignore_file(directory_path: Union[Path, str]) -> IgnoreFile:\n aml_ignore = AmlIgnoreFile(directory_path)\n git_ignore = GitIgnoreFile(directory_path)\n\n if aml_ignore.exists():\n return aml_ignore\n if git_ignore.exists():\n return git_ignore\n return IgnoreFile()",
"def get_untracked_files():\n untracked_files = set()\n for _, dirs, files in os.walk(os.getcwd()):\n for d in dirs:\n if d not in staging_obj_names:\n file_path = get_path_outside_wit(filename=d.strip())\n if file_path:\n untracked_files.add(file_path)\n for f in files:\n if f not in staging_obj_names:\n file_path = get_path_outside_wit(filename=f.strip())\n if file_path:\n untracked_files.add(file_path)\n return untracked_files",
"def get_gitignore(path):\n\n if '.gitignore' in os.listdir(path):\n return parse_gitignore(os.path.join(path, '.gitignore'))\n else:\n full_path = os.path.abspath(path)\n if full_path == '/':\n return\n return get_gitignore(os.path.dirname(full_path))",
"def is_ignored(file, ignored):\n return any(i in PurePath(path.abspath(file)).parts for i in ignored)",
"def default_ignore(location):\n ignore = '\\n'.join(DEFAULT_IGNORE)\n with utils.cd(location):\n with open('.gitignore', 'w+') as f:\n f.write(ignore)",
"def retrieveIgnoredWords(self):\n words = self.con.getIgnoredWords()\n guilds = self.con.getGuildsInfo()\n\n for item in guilds:\n self.ignored[item.split(',')[0]] = []\n\n for word in words:\n data = word.split(',')\n self.ignored[data[0]].append(data[1])",
"def test_find_not_should_ignore_path_glob(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore_glob\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file, \"glob\"):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)",
"def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames",
"def files_in_folder(self):\n non_til = set()\n filesInFolder = []\n for f in self.find_all_files():\n newstr = f.replace(\"~\", \"\") \n if newstr in self.find_all_files():\n non_til.add(newstr)\n for fs in non_til:\n filesInFolder.append(fs)\n return filesInFolder"
]
| [
"0.698658",
"0.66188174",
"0.6460673",
"0.64552116",
"0.6418179",
"0.6402279",
"0.6382113",
"0.6331511",
"0.628935",
"0.62670165",
"0.6191348",
"0.6068765",
"0.6014593",
"0.59507906",
"0.5916836",
"0.5842815",
"0.58326954",
"0.579473",
"0.57505643",
"0.5738737",
"0.56943214",
"0.56390464",
"0.5621533",
"0.55112034",
"0.5496807",
"0.5496588",
"0.54213375",
"0.54200864",
"0.5378255",
"0.53642756"
]
| 0.79720974 | 0 |
Parses the timespan format used in the manifest.json format. | def parse_timespan(unparsed):
pattern = '%H:%M:%S'
return datetime.strptime(unparsed, pattern) - datetime.strptime('00:00:00', pattern) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_task_time(line):\n stripret = \"\".join(line.split())\n p = re.compile(r'\\d+\\.\\d{2}-\\d+\\.\\d{2}')\n findret = p.findall(stripret) \n if findret:\n formatstr = \" \".join(line.split())\n timeregx = r'\\d+\\.\\d{2}\\s*-\\s*\\d+\\.\\d{2}'\n time = re.compile(timeregx).findall(formatstr)[0].replace(\" \", \"\").replace(\":\", \".\")\n taskcontext = re.sub(timeregx, \"\", formatstr).strip().replace(\":\", \"\")\n return [taskcontext, time]\n else:\n # log it if line can't be parse\n logging.warning(\"unparsed line: [%r]\" % line)",
"def parse_timespan(timedef):\n\tif isinstance(timedef, int):\n\t\treturn timedef\n\tconverter_order = ('w', 'd', 'h', 'm', 's')\n\tconverters = {\n\t\t'w': 604800,\n\t\t'd': 86400,\n\t\t'h': 3600,\n\t\t'm': 60,\n\t\t's': 1\n\t}\n\ttimedef = timedef.lower()\n\tif timedef.isdigit():\n\t\treturn int(timedef)\n\telif len(timedef) == 0:\n\t\treturn 0\n\tseconds = -1\n\tfor spec in converter_order:\n\t\ttimedef = timedef.split(spec)\n\t\tif len(timedef) == 1:\n\t\t\ttimedef = timedef[0]\n\t\t\tcontinue\n\t\telif len(timedef) > 2 or not timedef[0].isdigit():\n\t\t\tseconds = -1\n\t\t\tbreak\n\t\tadjustment = converters[spec]\n\t\tseconds = max(seconds, 0)\n\t\tseconds += (int(timedef[0]) * adjustment)\n\t\ttimedef = timedef[1]\n\t\tif not len(timedef):\n\t\t\tbreak\n\tif seconds < 0:\n\t\traise ValueError('invalid time format')\n\treturn seconds",
"def parse_time(self, gc):\n\n def match(time_str):\n if time_str == \"Half\":\n time = 0\n minute = -3\n status = 'd'\n elif time_str == \"ET\":\n time = 0\n minute = -1\n status = 'd'\n elif time_str == \"Final\":\n time = 0\n minute = 90\n status = 'f'\n elif re.match(\".*[\\d]{2}:[\\d]{2} UK\", time_str):\n time = re.search(\".*([\\d]{2}):([\\d]{2}) UK\", time_str).groups()\n time = datetime.time(int(time[0]), int(time[1]))\n minute = 0\n status = 'o'\n elif re.match(\".*[\\d]{1,3}\\'\", time_str):\n time = 0\n minute = re.search(\"([\\d]{1,3})\\'\", time_str).groups()[0]\n status = 'd'\n elif re.match(\".*[\\d]{1,3} min\", time_str):\n time = 0\n minute = re.search(\"([\\d]{1,3}) min\", time_str).groups()[0]\n status = 'd'\n elif time_str == \"1st\":\n time = 0\n minute = -4\n status = 'd'\n elif time_str == \"2nd\":\n time = 0\n minute = -2\n status = 'd'\n else:\n time = 0\n minute = 0\n status = 'c'\n\n return time, minute, status\n\n # (o)pen / (s)tarted / (f)inished\n try:\n t = gc.find(name='div', attrs={'class': 'teamTop_inGame'}).contents\n if type(t) == type([]) and len(t) > 0:\n return match(str(t[0]).strip())\n else:\n pass\n except AttributeError:\n pass\n\n try:\n t = gc.find(name='div', attrs={'class': 'teamTop'}).a.contents\n if type(t) == type([]):\n return match(str(t[0]).strip())\n else:\n pass\n\n except AttributeError:\n pass\n\n try:\n t = gc.find(name='div', attrs={'class': 'teamTop'}).contents\n if type(t) == type([]):\n if str(t[0]).strip() == \"Postp.\": # match postponed\n return 0, 0, 'p'\n else: # match cancelled or sth ;)\n return 0, 0, 'c'\n else:\n pass\n\n except AttributeError:\n pass\n\n return False, False, False",
"def __parse(self):\n lines = self.data.readlines()\n for i in range(0, len(lines)):\n line = lines[i]\n if line[0] == '#':\n continue\n tokens = line.split(\"\\t\")\n time_str = tokens[self.timecol]\n if time_str.find('start:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n elif time_str.find('end:') != -1:\n time_str = time_str.split()[1] + \" \" + time_str.split()[2]\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.calls.append((0, 0, 0))\n self.durations.append(0.0)\n break\n else:\n duration = float(tokens[6])\n fms = int(tokens[2])\n hfms = int(tokens[3])\n svs = int(tokens[4])\n self.calls.append((fms, hfms, svs))\n self.durations.append(duration)\n time = datetime.strptime(time_str, \"%Y-%m-%d %H:%M:%S\")\n self.times.append(time)\n self.length = (self.times[len(self.times) - 1] -\\\n self.times[0]).seconds",
"def _parse_time_metadata(self, data, kwargs):\n try:\n time = self._get_time_range(data)\n except KeyError:\n time = []\n try:\n time_steps = data.coords[self.time_field].size\n except KeyError:\n time_steps = kwargs.get('limit')\n return time, time_steps",
"def parse_time(self):\n\n # parse time\n year = int(self.start[:4])\n month = int(self.start[5:7])\n day = int(self.start[8:10])\n hours = int(self.start[11:13])\n minutes = int(self.start[14:16])\n seconds = int(self.start[17:19])\n time = datetime.datetime(year, month, day, hours, minutes, seconds)\n\n # advance time\n time = time + datetime.timedelta(minutes=self.rain_interval)\n time = time.isoformat(\" \")\n\n # timestamp\n # elevation (m)\n evolved_elevation = (\n 'elevation_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # water depth (m)\n depth = (\n 'depth_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # sediment flux (kg/ms)\n sediment_flux = (\n 'flux_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # erosion-deposition (kg/m2s)\n erosion_deposition = (\n 'erosion_deposition_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # elevation difference (m)\n difference = (\n 'difference_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n\n return (evolved_elevation, time, depth, sediment_flux,\n erosion_deposition, difference)",
"def parse(str):\n if len(str) != 16:\n raise ValueError(\"Invalid time length %d\" % len(str))\n if (str[-1]) == 'R':\n return parse_relative_time(str)\n return parse_absolute_time(str)",
"def parse_entry(msg):\n values = msg.split(';')\n return {\n 'dt': datetime.strptime(\n values[0], '%Y-%m-%d %H:%M:%S.%f'),\n 'event': values[1]\n }",
"def timespan(self, timespan=None, timezone=None):\r\n url = '{0}/{1}'.format(self.get_pull_url(), 'timespan')\r\n params = base.get_params(('timespan', 'timezone'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json",
"def parseLineWTime ( tupl ):\n\n\ttry:\n\t\th1,m1 = tupl[0].split(\":\")\n\t\th2,m2 = tupl[2].split(\":\")\n\t\tactivity = tupl[3] \t\n\t\tnotes = tupl[4]\n\texcept IndexError:\n\t\treturn 0.0, \"unk\", []\n\n\tt1 = timedelta(hours=int(h1), minutes=int(m1))\n\tt2 = timedelta(hours=int(h2), minutes=int(m2))\n\n\tdelta = t2 - t1\n\n\treturn delta.seconds / (3600.0) , activity, notes",
"def _ParseStartTime(output: str) -> float:\n hosts = output['sysstat']['hosts']\n date = hosts[0]['date']\n time = hosts[0]['statistics'][0]['timestamp']\n # TODO(user): handle malformed json output from mpstat\n start_datetime_string = ' '.join([date, time])\n # As a sysstat utility, this is printed in UTC by default\n start_datetime = datetime.datetime.strptime(\n start_datetime_string,\n '%Y-%m-%d %H:%M:%S').replace(tzinfo=datetime.timezone.utc)\n return start_datetime.timestamp()",
"def parse_times(time_str):\n warnings = []\n days, interval = time_str.split(',')\n assert int(days) == float(days)\n days = int(days)\n assert int(interval) == float(interval)\n interval = int(interval)\n if interval < 3:\n warnings.append('Minimum interval is 3 hours')\n if days > 14:\n warnings.append('Maximum spot forecast period is 14 days')\n hours = np.arange(days * 24 + 1)[::interval]\n return hours.tolist(), warnings",
"def parse_time(text):\n\n # When keyword is 'in' adds values to time\n if text[-3] == 'in':\n remind_time = time.gmtime(int(text[-2]) * int(text[-1]) + time.time())\n # Otherwise try to parse time as written\n else:\n remind_time = text[-1].replace(':', ' ') \\\n + \" \" \\\n + time.strftime(\"%m/%d/%y\", time.gmtime(time.time()))\n remind_time = time.strptime(remind_time, \"%H %M %m/%d/%y\")\n return remind_time",
"def parse(s):\n\n t = AbsoluteTimer()\n t.id = s.get(\"id\", None)\n t.name = s.get(\"name\", None)\n \n if s.has_key(\"abstime\"):\n\n parts = s[\"abstime\"].split(\" \")\n\n if len(parts) != 2:\n raise RuntimeError, \"Invalid date format\"\n\n dateparts = parts[0].split(\"-\")\n timeparts = parts[1].split(\":\")\n \n if len(dateparts) != 3:\n raise RuntimeError, \"Invalid date format\"\n if len(timeparts) != 3:\n raise RuntimeError, \"Invalid date format\"\n\n t.year = int(dateparts[0])\n t.month = int(dateparts[1])\n t.date = int(dateparts[2])\n t.hours = int(timeparts[0])\n t.minutes = int(timeparts[1])\n t.seconds = int(timeparts[2])\n\n return t",
"def _parse_marathon_event_timestamp(timestamp):\n return datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S.%fZ\")",
"def read_manifest(manifest_path, max_duration=float('inf'), min_duration=0.0):\n manifest = []\n for json_line in codecs.open(manifest_path, 'r', 'utf-8'):\n try:\n json_data = json.loads(json_line)\n except Exception as e:\n raise IOError(\"Error reading manifest: %s\" % str(e))\n if (json_data[\"duration\"] <= max_duration and\n json_data[\"duration\"] >= min_duration):\n manifest.append(json_data)\n return manifest",
"def _parse_duration(path):\n tag = \"[FlowShaper] Application complete after \" # xxx ms\n found = None\n with (path / \"stdout.txt\").open(mode=\"r\") as stdout:\n found = [line for line in stdout if line.startswith(tag)][-1]\n assert found, f\"Run never completed! {path}\"\n\n # Parse the next word as an integer\n return int(found[len(tag):].split()[0])",
"def bufkit_parser_time_height(config, file_name, interval=1, start_dt=None, end_dt=None):\r\n # Open the file\r\n infile = open(file_name, 'r')\r\n\r\n profile = OrderedDict()\r\n\r\n # Find the block that contains the description of what everything is (header information)\r\n block_lines = []\r\n inblock = False\r\n block_found = False\r\n for line in infile:\r\n if line.startswith('PRES TMPC') and not block_found:\r\n # We've found the line that starts the header info\r\n inblock = True\r\n block_lines.append(line)\r\n elif inblock:\r\n # Keep appending lines until we start hitting numbers\r\n if re.match('^\\d{3}|^\\d{4}', line):\r\n inblock = False\r\n block_found = True\r\n else:\r\n block_lines.append(line)\r\n\r\n # Now compute the remaining number of variables\r\n re_string = ''\r\n for line in block_lines:\r\n dum_num = len(line.split())\r\n for n in range(dum_num):\r\n re_string = re_string + '(-?\\d{1,5}.\\d{2}) '\r\n re_string = re_string[:-1] # Get rid of the trailing space\r\n if line[-2] == '\\r': #Python 2 has a carriage return\r\n re_string = re_string + '\\r\\n'\r\n else: #Python 3 doesn't have one\r\n re_string = re_string + '\\n'\r\n\r\n # Compile this re_string for more efficient re searches\r\n block_expr = re.compile(re_string)\r\n\r\n # Now get corresponding indices of the variables we need\r\n full_line = ''\r\n for r in block_lines:\r\n if r[-2] == '\\r': #Python 2 has a carriage return\r\n full_line = full_line + r[:-2] + ' '\r\n else: #Python 3 doesn't have it\r\n full_line = full_line + r[:-1] + ' '\r\n # Now split it\r\n varlist = re.split('[ /]', full_line)\r\n # Get rid of trailing space\r\n varlist = varlist[:-1]\r\n\r\n # Variables we want\r\n vars_desired = ['TMPC', 'DWPC', 'UWND', 'VWND', 'HGHT']\r\n\r\n # Pressure levels to interpolate to\r\n plevs = [600, 750, 850, 925]\r\n plevs = [p for p in plevs if p <= float(config['lowest_p_level'])]\r\n\r\n # We now need to break everything up into a chunk for each\r\n # forecast date and time\r\n with open(file_name) as infile:\r\n blocks = infile.read().split('STID')\r\n for block in blocks:\r\n interp_plevs = []\r\n header = block\r\n if header.split()[0] != '=':\r\n continue\r\n fcst_time = re.search('TIME = (\\d{6}/\\d{4})', header).groups()[0]\r\n fcst_dt = datetime.strptime(fcst_time, '%y%m%d/%H%M')\r\n if start_dt is not None and fcst_dt < start_dt:\r\n continue\r\n if end_dt is not None and fcst_dt > end_dt:\r\n break\r\n if fcst_dt.hour % interval != 0:\r\n continue\r\n temp_vars = OrderedDict()\r\n for var in varlist:\r\n temp_vars[var] = []\r\n temp_vars['PRES'] = []\r\n for block_match in block_expr.finditer(block):\r\n vals = block_match.groups()\r\n for val, name in zip(vals, varlist):\r\n if float(val) == -9999.:\r\n temp_vars[name].append(np.nan)\r\n else:\r\n temp_vars[name].append(float(val))\r\n\r\n # Unfortunately, bufkit values aren't always uniformly distributed.\r\n final_vars = OrderedDict()\r\n cur_plevs = temp_vars['PRES']\r\n cur_plevs.reverse()\r\n for var in varlist[1:]:\r\n if var in (vars_desired + ['SKNT', 'DRCT']):\r\n values = temp_vars[var]\r\n values.reverse()\r\n interp_plevs = list(plevs)\r\n num_plevs = len(interp_plevs)\r\n f = interpolate.interp1d(cur_plevs, values, bounds_error=False)\r\n interp_vals = f(interp_plevs)\r\n interp_array = np.full((len(plevs)), np.nan)\r\n # Array almost certainly missing values at high pressures\r\n interp_array[:num_plevs] = interp_vals\r\n interp_vals = list(interp_array)\r\n interp_plevs = list(plevs) # use original array\r\n interp_vals.reverse()\r\n interp_plevs.reverse()\r\n if var == 'SKNT':\r\n wspd = np.array(interp_vals)\r\n if var == 'DRCT':\r\n wdir = np.array(interp_vals)\r\n if var in vars_desired:\r\n final_vars[var] = interp_vals\r\n final_vars['PRES'] = interp_plevs\r\n if 'UWND' not in final_vars.keys():\r\n final_vars['UWND'] = list(wspd * np.sin(wdir * np.pi/180. - np.pi))\r\n if 'VWND' not in final_vars.keys():\r\n final_vars['VWND'] = list(wspd * np.cos(wdir * np.pi/180. - np.pi))\r\n profile[fcst_dt] = final_vars\r\n\r\n return profile",
"def parse_game_session_start_and_end_times(log_data, frags):\n try:\n log_start_time = parse_log_start_time(log_data)\n\n # Get the match starting time\n start_match = search(\n r\"<(\\d{2}):(\\d{2})> [^d]* Loading level \\w+\\/\\w+, \\w+ \\w+\",\n log_data)\n start_time = log_start_time.replace(\n minute=int(start_match.group(1)), second=int(start_match.group(2)))\n\n # If the minute is smaller than the starting log's minute\n # -> increase 1 hour\n if start_time.minute < log_start_time.minute:\n start_time += timedelta(hours=1)\n\n # Get the match ending time\n # If frags is not empty\n if frags:\n last_frag = frags[-1]\n last_frag_time = last_frag[0].strftime(\"%M:%S\")\n end_match = findall(\n \"<{}> \".format(last_frag_time) +\n r\"<\\w+> [a-zA-Z0-9_ ]* killed \" +\n r\"(?:itself|[a-zA-Z0-9_ ]* with \\w+)\",\n log_data)\n\n # Get the index of the beginning of the line\n # after the last frag in log data\n i = log_data.index(end_match[-1]) + len(end_match[-1]) + 1\n end_time = last_frag[0].replace(\n minute=int(log_data[i+1:i+3]), second=int(log_data[i+4:i+6]))\n\n # If the minute is smaller than the ending frag's minute\n # -> increase 1 hour\n if end_time.minute < last_frag[0].minute:\n end_time += timedelta(hours=1)\n else:\n end_match = findall(r\"<(\\d+):(\\d+)>\", log_data)\n end_time = start_time.replace(\n minute=int(end_match[-1][0]), second=int(end_match[-1][1]))\n\n if end_time.minute < start_time.minute:\n end_time += timedelta(hours=1)\n\n return (start_time.isoformat(), end_time.isoformat())\n\n except Exception:\n print(\"Something is wrong with the log file!\")",
"def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)",
"def parse(s):\n\n rise = False\n set = False\n if s[-1:] == \"R\":\n rise = True\n s = s[:-1]\n elif s[-1:] == \"T\":\n set = True\n s = s[:-1]\n \n x = s.split(\":\")\n if len(x) == 1:\n x.append(\"0\")\n if len(x) == 2:\n x.append(\"0\")\n \n return Time(int(x[0]), int(x[1]), int(x[2]), after_sunrise=rise,\n after_sunset=set)",
"def parse(timestring):\n for parser in _PARSERS:\n match = parser['pattern'].match(timestring)\n if match:\n groups = match.groups()\n ints = tuple(map(int, groups))\n time = parser['factory'](ints)\n return time\n\n raise TimeError('Unsupported time format {}'.format(timestring))",
"def test_case(self):\n expected = dict(seconds=1)\n self.assertEqual(expected, util.parse_relative_time_string(\"+1s\"))\n self.assertEqual(expected, util.parse_relative_time_string(\"+1S\"))",
"def decode_time_stamps(time_stamp, website):\n if website in {\"Zeit\", \"NOZ\", \"Welt\"}:\n current_time = datetime.now()\n minutes_which_have_passed = re.findall(\n \"[a-zA-Z]+\\s([0-9]+)\\sMinute\", time_stamp)\n hours_which_have_passed = re.findall(\n \"[a-zA-Z]+\\s([0-9]+)\\sStunde\", time_stamp)\n seconds_which_have_passed = re.findall(\n \"[a-zA-Z]+\\s([0-9]+)\\sSekunde\", time_stamp)\n months_which_have_passed = re.findall(\n \"[a-zA-Z]+\\s([0-9]+)\\sMonat\", time_stamp)\n days_which_have_passed = re.findall(\n \"[a-zA-Z]+\\s([0-9]+)\\sTag\", time_stamp)\n if minutes_which_have_passed:\n timedelta_difference = timedelta(minutes=int(\"\".join(\n minutes_which_have_passed)))\n elif hours_which_have_passed:\n timedelta_difference = timedelta(hours=int(\"\".join(\n hours_which_have_passed)))\n elif seconds_which_have_passed:\n timedelta_difference = timedelta(seconds=int(\"\".join(\n seconds_which_have_passed)))\n elif months_which_have_passed:\n timedelta_difference = timedelta(weeks=int(\"\".join(\n months_which_have_passed*4)))\n elif days_which_have_passed:\n timedelta_difference = timedelta(days=int(\"\".join(\n days_which_have_passed)))\n else:\n raise ValueError(\"This seems to be an improper time format: \"\n + time_stamp)\n correct_time = current_time - timedelta_difference\n return correct_time.replace(microsecond=0).isoformat()\n elif website == \"Handelsblatt\":\n correct_time = datetime.fromtimestamp(\n time.mktime(time.strptime(time_stamp, \"%Y-%m-%dT%H:%M:%S%z\")))\n return correct_time.isoformat()\n elif website == \"TAZ\":\n correct_time = datetime.fromtimestamp(\n time.mktime(time.strptime(time_stamp[:-6] + \"+0200\",\n \"%Y-%m-%dT%H:%M:%S%z\")))\n return correct_time.replace(microsecond=0).isoformat()\n elif website == \"RP\":\n correct_time = datetime.fromtimestamp(\n time.mktime(time.strptime(time_stamp[:-6] + \"+0200\",\n \"%Y-%m-%dT%H:%M%z\")))\n return correct_time.replace(microsecond=0).isoformat()\n elif website == \"TZ\" or website == \"Merkur\" or website == \"FR\":\n # set locale to recognise German terms\n locale.setlocale(locale.LC_TIME, \"de_DE\")\n correct_time = datetime.fromtimestamp(\n time.mktime(time.strptime(time_stamp, \"%A, %d. %B %Y %H:%M Uhr\")))\n # reset locale, for whatever purpose\n locale.resetlocale()\n return correct_time.replace(microsecond=0).isoformat()\n elif website in {\"Spiegel\", \"TAZ\"}:\n minutes_today = re.findall(\n \"heute,\\s[0-9][0-9]:([0-9][0-9])\", time_stamp)\n hours_today = re.findall(\n \"heute,\\s([0-9][0-9]):[0-9][0-9]\", time_stamp)\n minutes_yesterday = re.findall(\n \"gestern,\\s[0-9][0-9]:([0-9][0-9])\", time_stamp)\n hours_yesterday = re.findall(\n \"gestern,\\s([0-9][0-9]):[0-9][0-9]\", time_stamp)\n previous_date = re.findall(\n \"([0-9][0-9].[0-9][0-9].[0-9][0-9])\", time_stamp)\n if minutes_today:\n correct_time = datetime.now()\n correct_time = correct_time.replace(\n hour=int(\"\".join(hours_today)),\n minute=int(\"\".join(minutes_today)))\n elif minutes_yesterday:\n correct_time = datetime.now()\n correct_time = correct_time.replace(\n hour=int(\"\".join(hours_yesterday)),\n minute=int(\"\".join(minutes_yesterday)))\n correct_time = correct_time - timedelta(days=1)\n elif previous_date:\n correct_time = datetime.fromtimestamp(\n time.mktime(time.strptime(\"\".join(previous_date) + \"T00:01\",\n \"%d.%m.%yT%H:%M\")))\n else:\n raise ValueError(\"This seems to be an improper time format :\"\n + time_stamp)\n return correct_time.replace(microsecond=0).isoformat()\n elif website == \"FAZ\":\n # strip of one whitespace character, one minus sign and\n # one whitespace character\n correct_time = datetime.fromtimestamp(\n time.mktime(time.strptime(time_stamp[3:], \"%d.%m.%Y %H:%M\")))\n return correct_time.replace(microsecond=0).isoformat()\n elif website == \"NW\":\n correct_time = datetime.fromtimestamp(\n time.mktime(time.strptime(time_stamp, \"%d.%m.%Y %H:%M\")))\n return correct_time.replace(microsecond=0).isoformat()\n elif website == \"TA\":\n correct_time = datetime.fromtimestamp(\n time.mktime(time.strptime(time_stamp, \"%d.%m.%Y - %H:%M\")))\n return correct_time.replace(microsecond=0).isoformat()\n else:\n raise ValueError(\"This website argument seems to be wrong :\"\n + website)",
"def test_parse_valid_time_range(self):\n from azure.servicefabric.models.time_range import (\n TimeRange\n )\n from azure.servicefabric.models.time_of_day import (\n TimeOfDay\n )\n\n res = sf_c.parse_time_range({\n 'StartTime': {\n 'Hour': 0,\n 'Minute': 0\n },\n 'EndTime': {\n 'Hour': 23,\n 'Minute': 59,\n }\n })\n\n self.assertIsInstance(res, TimeRange)\n\n self.assertIsInstance(res.start_time, TimeOfDay)\n self.assertEqual(res.start_time.hour, 0)\n self.assertEqual(res.start_time.minute, 0)\n\n self.assertIsInstance(res.end_time, TimeOfDay)\n self.assertEqual(res.end_time.hour, 23)\n self.assertEqual(res.end_time.minute, 59)",
"def time_formats(self) -> localedata.LocaleDataDict:\n return self._data['time_formats']",
"def test_parse_time_with_invalid_interval(self):\n now = datetime(2015, 2, 1, 0, 0, 0)\n self.assert_TPVE(parse_time, \"-0\", now)\n self.assert_TPVE(parse_time, \"-12\", now)\n self.assert_TPVE(parse_time, \"-12fortnights\", now)\n self.assert_TPVE(parse_time, \"-20150101\", now)",
"def parse_duration(duration):\n duration = str(duration).upper().strip()\n\n elements = ELEMENTS.copy()\n\n for pattern in (SIMPLE_DURATION, COMBINED_DURATION):\n if pattern.match(duration):\n found = pattern.match(duration).groupdict()\n del found['time']\n\n elements.update(dict((k, int(v or 0))\n for k, v\n in found.items()))\n\n return datetime.timedelta(days=(elements['days'] +\n _months_to_days(elements['months']) +\n _years_to_days(elements['years'])),\n hours=elements['hours'],\n minutes=elements['minutes'],\n seconds=elements['seconds']) \n \n return ParseError()",
"def parse_uptime(uptime_str):\n # import ipdb; ipdb.set_trace()\n\n if \"uptime is\" in uptime_str:\n # IOS/NX-OS\n uptime_str = uptime_str.split(\"uptime is\")[1]\n elif \"Uptime:\" in uptime_str:\n # Arista\n uptime_str = uptime_str.split(\"Uptime: \")[1]\n else:\n # Juniper - different text form\n # System booted: 2018-10-03 20:51:06 PDT (44w1d 01:59 ago)\n # pretend it just rebooted\n return 90\n\n # Initialize to zero\n (years, weeks, days, hours, minutes) = (0, 0, 0, 0, 0)\n\n uptime_str = uptime_str.strip()\n\n # Replace 'and' in Arista uptime with a comma so values get split appropriately\n uptime_str = uptime_str.replace(\"and\", \",\")\n\n time_list = uptime_str.split(\",\")\n #print(time_list)\n for element in time_list:\n if re.search(\"year\", element):\n years = int(element.split()[0])\n elif re.search(\"week\", element):\n weeks = int(element.split()[0])\n elif re.search(\"day\", element):\n days = int(element.split()[0])\n elif re.search(\"hour\", element):\n hours = int(element.split()[0])\n elif re.search(\"minute\", element):\n minutes = int(element.split()[0])\n\n uptime_sec = (\n (years * YEAR_SECONDS)\n + (weeks * WEEK_SECONDS)\n + (days * DAY_SECONDS)\n + (hours * 3600)\n + (minutes * 60)\n )\n return uptime_sec",
"def process_time_label(self, label):\n\n # \"HH:MM:SS\" has eight characters\n if len(label) != 8:\n raise ValueError(\"Label has invalid length (must be in HH-MM-SS format\")\n\n tokens = label.split('-')\n\n # Ensure tokens is a list of three values ('HH', 'MM', 'SS')\n if len(tokens) != 3 or not all(map(lambda x: len(x) == 2, tokens)):\n raise ValueError(\"Label be in HH-MM-SS format\")\n\n hours = int(tokens[0])\n minutes = int(tokens[1])\n seconds = int(tokens[2])\n\n if hours < 0 or hours > 23 or minutes < 0 or minutes > 59 or seconds < 0 or seconds > 59:\n raise ValueError(\"Label must be in HH-MM-SS format\")\n\n return hours, minutes, seconds"
]
| [
"0.6017276",
"0.595505",
"0.5945074",
"0.5825513",
"0.57538307",
"0.5595888",
"0.5492997",
"0.54822344",
"0.546899",
"0.5464472",
"0.5456662",
"0.5447526",
"0.5441685",
"0.54099345",
"0.538926",
"0.53401285",
"0.5325533",
"0.5311758",
"0.52765626",
"0.5264576",
"0.5249275",
"0.52467126",
"0.5236907",
"0.52357143",
"0.5203671",
"0.5186759",
"0.5178675",
"0.5163704",
"0.51606256",
"0.51546824"
]
| 0.6562235 | 0 |
Increments the current scope. Returns `True` if successful, otherwise `False`. | def next_scope(self) -> bool:
if self._scope_index + 1 >= len(self._scopes):
return False
self._scope_index += 1
self._current_scope = self._scopes[self._scope_index]
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prev_scope(self) -> bool:\n if self._scope_index - 1 < 0:\n return False\n self._scope_index -= 1\n self._current_scope = self._scopes[self._scope_index]\n return True",
"def isScopeActive(self, name):",
"def hasScope(self, name):",
"def enterScope(self, name):",
"def beginScope():",
"def scope_push(self) -> None:\n self.scope_stack.appendleft(defaultdict(lambda: 0))",
"def acquire(self):\n assert self.is_live == True, \"Can't acquire a non-live scope\"\n if self._acquired > 0:\n raise Exception('Can not reacquire scope')\n changes = {}\n global global_out\n global_out = self\n if self.parent is not None:\n returned = self.enter_scope_callback(self._global_context,\n self)\n if returned:\n changes = returned\n self._global_context.enter_scope(self, **changes)\n self._acquired = 1\n return self",
"def continue_next(self):\n\n self.scope_assign = {}\n self.scope_var_id = 0\n self.cont = True",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def increment(self):\n self.increments += 1\n if self.increments == self.length:\n self.finished = True",
"def has(self) -> bool:\n\n return self.scopefunc() in self.registry",
"def increment(self) -> global___Expression:",
"def is_incr(self, idx):\n return self.args[0].is_positive()",
"def dpp_scope_active():\n return _dpp_scope_active",
"def is_incr(self, idx):\n return False",
"def is_incr(self, idx):\n return False",
"def is_incr(self, idx):\n return False",
"def is_incr(self, idx) -> bool:\n return False",
"def backtrack(csp):\n\n # Base case\n if (is_complete(csp)):\n return True\n\n # Get first unassigned variable\n var = select_unassigned_variable(csp)\n\n # Iterate through domain\n for value in order_domain_values(csp, var):\n\n # Inference\n if is_consistent(csp, var, value):\n\n # Set rollback point\n csp.variables.begin_transaction()\n var.assign(value)\n\n # Explore this assignment\n if (inference(csp, var)):\n # GGWP\n if backtrack(csp):\n return True\n # Nope\n csp.variables.rollback()\n return False",
"def inc(self):\n self._value += 1",
"def increment(self):\r\n return self.add(1)",
"def incr_version_in_con_scope(self, con_scopestr: str, id: int, var_name: str):\n # NOTE: we should have added id to con_scope_to_highest_var_vers when we call\n # init_highest_var_vers_dict\n # if this does not happen, some logic has failed\n assert(id in self.con_scope_to_highest_var_vers[con_scopestr])\n self.con_scope_to_highest_var_vers[con_scopestr][id] += 1\n version = self.con_scope_to_highest_var_vers[con_scopestr][id]\n grfn_var = create_grfn_var(var_name, id, version, con_scopestr)\n fullid = build_fullid(var_name, id, version, con_scopestr)\n self.pipeline_state.store_grfn_var(fullid, grfn_var)",
"def _inc_counter(self) -> None:\n self._state_storage.increment_counter()",
"def increaseApproval(_spender: address, _addedValue: uint256) -> bool:\n\n self.allowed[msg.sender][_spender] += _addedValue\n log.Approval(msg.sender, _spender, self.allowed[msg.sender][_spender])\n return True",
"def addScope(name=None):\n try:\n global scopeSeq\n global currScope\n scopeSeq += 1\n lastScope = currScope\n currScope = scopeSeq\n scopeStack.append(currScope)\n scopeDict[currScope] = symbolTable(currScope)\n scopeDict[currScope].setParent(lastScope)\n if name is not None:\n if type(name) is list:\n scopeDict[lastScope].insert(name[1], 'func')\n scopeDict[lastScope].updateArgList(name[1], 'child', scopeDict[currScope])\n else:\n temp = currScope\n currScope = lastScope\n if checkId(name, '*'):\n pos = p.lexer.lexpos\n line = checkLineNo(pos,0)\n print(\"Name \" + name + \" already defined....\",line)\n return\n currScope = temp\n scopeDict[lastScope].insert(name, 'type'+name)\n scopeDict[lastScope].updateArgList(name, 'child', scopeDict[currScope])\n pass\n except Exception as e:\n print(\"WARNING:1:\"+str(e))\n return",
"def incr_cond(self):\n pass",
"def inc(self):\n with self.mutex:\n self.value += 1\n return self.value",
"def dpp_scope():\n global _dpp_scope_active\n prev_state = _dpp_scope_active\n _dpp_scope_active = True\n yield\n _dpp_scope_active = prev_state",
"def inc(self): # type: () -> bool\n try:\n self._idx, self._current = next(self._chars)\n\n return True\n except StopIteration:\n self._idx = len(self._src)\n self._current = TOMLChar(\"\\0\")\n\n return False",
"def _is_type_in_scope(self, name):"
]
| [
"0.6443153",
"0.6352644",
"0.600642",
"0.58440465",
"0.56883276",
"0.5544916",
"0.54464257",
"0.54252976",
"0.5288855",
"0.5240058",
"0.5224531",
"0.5200019",
"0.51939374",
"0.5191955",
"0.5179793",
"0.5179793",
"0.5179793",
"0.5151326",
"0.5147138",
"0.51400316",
"0.5100714",
"0.50766027",
"0.50260276",
"0.5009596",
"0.50073683",
"0.50001717",
"0.49915624",
"0.49827832",
"0.49778068",
"0.49469936"
]
| 0.7437988 | 0 |
Decrements the current scope. Returns `True` if successful, otherwise `False`. | def prev_scope(self) -> bool:
if self._scope_index - 1 < 0:
return False
self._scope_index -= 1
self._current_scope = self._scopes[self._scope_index]
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def leaveScope(self, name):",
"def endScope():",
"def deleteScope():\n global currScope\n scopeStack.pop()\n currScope = scopeStack[-1]",
"def scope_pop(self) -> None:\n self.scope_stack.popleft()",
"def next_scope(self) -> bool:\n if self._scope_index + 1 >= len(self._scopes):\n return False\n self._scope_index += 1\n self._current_scope = self._scopes[self._scope_index]\n return True",
"def release(self):\n assert self.is_live == True, \"Can't release a non-live scope\"\n if self._acquired == 0:\n raise Exception('Must first acquire scope, then release')\n elif self._acquired == 2:\n # Do nothing if already released\n return\n changes = {}\n global global_out\n global_out = self.parent\n if self.parent is not None:\n returned = self.exit_scope_callback(self._global_context,\n self)\n if returned:\n changes = returned\n self._global_context.exit_scope(self, **changes)\n self._acquired = 2",
"def exit_var_scope(self):\n # type: () -> Scope[expr.Var]\n\n return self.var_scopes.popleft()",
"def remove_scope(self, ):\n if self.AttributeNames.SCOPE in self.attrs:\n del self.attrs[self.AttributeNames.SCOPE]\n return self",
"def exit_type_param_scope(self):\n # type: () -> Scope[ty.TypeVar]\n\n return self.type_param_scopes.popleft()",
"def hasScope(self, name):",
"def isScopeActive(self, name):",
"def can_go_back(self):\n return self._pointer >= 1",
"def done(self, env):\n del env\n return False",
"def backtrack(csp):\n\n # Base case\n if (is_complete(csp)):\n return True\n\n # Get first unassigned variable\n var = select_unassigned_variable(csp)\n\n # Iterate through domain\n for value in order_domain_values(csp, var):\n\n # Inference\n if is_consistent(csp, var, value):\n\n # Set rollback point\n csp.variables.begin_transaction()\n var.assign(value)\n\n # Explore this assignment\n if (inference(csp, var)):\n # GGWP\n if backtrack(csp):\n return True\n # Nope\n csp.variables.rollback()\n return False",
"def decreasing(self):\n return not self.direction()",
"def closed(self) -> bool:\n return self._out_of_scope or self._consumed",
"def scope_reset(client, args):\n result = client.get_scope()\n if result.is_custom:\n print(\"Proxy is using a custom function to check scope. Cannot set context to scope.\")\n return\n client.context.set_query(result.filter)",
"def reward(self, env):\n del env\n return 1",
"def destroyed(self) -> bool:\n return self._ptr is None",
"def __exit__(self, exc_type, exc_val, exc_tb):\n self.delete()\n if exc_type:\n return False\n return True",
"def done(self):\n\t\tdef txn():\n\t\t\tother = db.get(self.key())\n\t\t\tif other and other.eta == self.eta:\n\t\t\t\tother.delete()\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\treturn db.run_in_transaction(txn)",
"def clear(self) -> None:\n\n try:\n del self.registry[self.scopefunc()]\n except KeyError:\n pass",
"def deactivate(self) -> bool:\n pass",
"def continue_next(self):\n\n self.scope_assign = {}\n self.scope_var_id = 0\n self.cont = True",
"def deQueue(self):\n if self.rear - self.front > 0:\n self.front += 1\n return True\n else:\n return False",
"def scope_delete(client, args):\n client.set_scope([])",
"def decrement(self):\r\n return self.add(-1)",
"def fusion_api_delete_scope(self, uri=None, api=None, headers=None):\n return self.scope.delete(uri, api, headers)",
"def undo(self) -> CompilerEnv:\n if not self.stack:\n return\n self.env.close()\n self.env = self.stack.pop()\n return self.env",
"def deQueue(self) -> bool:\n if self.count == 0:\n return False\n self.headIndex = (self.headIndex + 1) % self.capacity\n self.count -= 1\n return True"
]
| [
"0.66638553",
"0.64204556",
"0.63871336",
"0.6014307",
"0.601418",
"0.56921864",
"0.5668897",
"0.540422",
"0.52995574",
"0.5242012",
"0.5185722",
"0.51733506",
"0.51417696",
"0.51218563",
"0.5106407",
"0.5016107",
"0.5004228",
"0.49873295",
"0.49578768",
"0.4929177",
"0.48866877",
"0.48548713",
"0.48320466",
"0.48261246",
"0.47951433",
"0.47926018",
"0.47689268",
"0.476167",
"0.47562155",
"0.47499552"
]
| 0.6474312 | 1 |
Returns the actions to be executed when the `phrase` is said or an empty list if the `phrase` isn't recognized. | def actions(self, phrase: str) -> list:
return self._current_scope.get(phrase, []) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _complete_actions(self, text):\r\n return [ a + ' ' for a in self.vocab if a.startswith(text)]",
"def complete(self, text, state, line=None):\r\n if line is None: # line is only set in tests\r\n import readline\r\n line = readline.get_line_buffer()\r\n\r\n # take the last phrase from a line like \"stop foo; start bar\"\r\n phrase = line.split(';')[-1]\r\n\r\n matches = []\r\n # blank phrase completes to action list\r\n if not phrase.strip():\r\n matches = self._complete_actions(text)\r\n else:\r\n words = phrase.split()\r\n action = words[0]\r\n # incomplete action completes to action list\r\n if len(words) == 1 and not phrase.endswith(' '):\r\n matches = self._complete_actions(text)\r\n # actions that accept an action name\r\n elif action in ('help'):\r\n matches = self._complete_actions(text)\r\n # actions that accept a group name\r\n elif action in ('add', 'remove', 'update'):\r\n matches = self._complete_groups(text)\r\n # actions that accept a process name\r\n elif action in ('clear', 'fg', 'pid', 'restart', 'start',\r\n 'stop', 'status', 'tail'):\r\n matches = self._complete_processes(text)\r\n if len(matches) > state:\r\n return matches[state]",
"def actions(self, state):\n l = len(state)\n if (l == 0): return [self.query[0]]\n ret = []\n blank = state.count(' ')\n if (l - blank < len(self.query)):\n ret += [self.query[l - blank]]\n if (state[-1] != ' '): ret += ' '\n return ret",
"def get_possible_actions(self) -> [Action]:\r\n if self.fields[self.agent_x][self.agent_y] == Field.EMPTY or self.terminated:\r\n return [Action.NORTH, Action.EAST, Action.SOUTH, Action.WEST]\r\n else: # must be terminal\r\n return [Action.TERMINAL]",
"def get_actions(self):\n\n if self.description == exceptions.NotAvailableError:\n raise exceptions.NotAvailableError('Can\\'t get actions because a description for this service is'\n ' not available.')\n return list(self.actions.values())",
"def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices",
"def action(self):\n returns = []\n for command in self._commands:\n #try:\n returns.append(eval(command))\n #except: #TODO Shouldn't except without specifying a type or indicating what the error is\n # print \"Error: Could not execute rule action:\", command, str(self.device)\n \n self.calls += 1\n self.last_call_time = time.time()\n return returns",
"def get_possible_actions(self, state):\n return [LEFT, DOWN, RIGHT, UP]",
"def get_actions(self):\n return []",
"def get_list_of_actions(self):\n return self.actions",
"def valid_actions(self) -> List[str]:\n return list(self.action_map().keys())",
"def getLegalActions(self, state):\n return self.actionFn(state)",
"def scan(self, message):\n words = message.split(\" \")\n for word in words:\n self.words.append(self.__check_word__(word))\n return self.words",
"def parse_command_to_actions(moving_command):\n regex = re.compile(r'[A-Z][0-9]*')\n return re.findall(regex, moving_command)",
"def get_actions(self, state: TState = None) -> Sequence[TAction]:\n pass",
"def get_action_list(program_name: str) -> str:\n short_name = int(program_name[8:])\n es = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n res = es.search(\n index='actions-index', \n params= {'size': 1}, \n body={\"query\": {\"match\": {'name' : short_name}}})\n for hit in res['hits']['hits']:\n return hit['_source']['actions']\n return \"\"",
"def actions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"actions\")",
"def actions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"actions\")",
"def getLegalActions(self):\n return ['BOT', 'SLD']",
"def get_speech(self, word):\n posses = ['verb', 'noun', 'adj', 'adv', 'as in', 'conjunction']\n speeches = []\n\n def get_all_synonyms(word1, speech1):\n for w in Word(word1).synonyms('all', partOfSpeech=speech1):\n if not w == []:\n return w\n return []\n\n def empty_tree(input_list):\n # print(input_list)\n if type(input_list) == type([]):\n for l in input_list:\n if not empty_tree(l):\n return False\n return True\n else:\n return False\n\n for poss in posses:\n if not empty_tree(get_all_synonyms(word, poss)):\n speeches.append(poss)\n return speeches",
"def getActions(self, state): \n util.raiseNotDefined()",
"def getLegalActions(self,state):\n return self.actionFn(state)",
"def phrase_list_transition():\n prs = [next_phrase(s) for s in word_lists.transition]\n return (Parse.first(prs) + next_word('that').possibly()).nil()",
"def get_action_list(self, basic_action=False):\n assert len(self.name), 'No character name {} found.'.format(self.name)\n lf2_char = Template(player_id=self.idx) if basic_action \\\n else globals()[self.name](player_id=self.idx)\n action_s = lf2_char.action_space()\n if basic_action:\n action_s.remove('run')\n return action_s",
"def test_user_grammar_actions():\n grammar = \"\"\"\n S: A B C;\n @nonterm_action\n C: A B;\n A: \"a\";\n @term_action\n B: \"b\";\n \"\"\"\n\n called = [False, False]\n\n def nonterm_action(_, __):\n called[0] = True\n\n def term_action(_, __):\n called[1] = True\n\n my_actions = {\n \"nonterm_action\": nonterm_action,\n \"term_action\": term_action,\n }\n\n g = Grammar.from_string(grammar)\n p = Parser(g, actions=my_actions)\n assert p.parse(\"a b a b\")\n assert all(called)",
"def action_list(self):\n already_visited, can_visit_list = self.check_hand()\n\n message = []\n\n for msg, hand in [('Visited', already_visited), ('Available', can_visit_list)]:\n bits = []\n\n for card in hand:\n h = Hero(card)\n rank = Hero.RANKS[h.client['rank']]\n\n # 10/J/Q/K/A\n bits.append(u'{0}{1}'.format(\n rank if h.client['rank'] == Hero.TEN else rank[0],\n Hero.FACE_SYMBOLS[h.client['race']]\n ))\n\n message.append(u'{0}={1}'.format(msg, ','.join(bits)))\n\n self.chat.send_message(EmrossWar.safe_text(', '.join(message)))",
"def get_action_meanings(self) -> list[str]:\n keys = ale_py.Action.__members__.values()\n values = ale_py.Action.__members__.keys()\n mapping = dict(zip(keys, values))\n return [mapping[action] for action in self._action_set]",
"def scan_dictionary_option(message):\n returnValue = []\n\n words = message.split(' ')\n for word in words:\n type = getType(word.lower())\n returnValue.append((type, word))\n\n return returnValue",
"def actions(self, state):\n words = get_words(state)\n derived = []\n for i in range (len(words)):\n wi = words[i]\n fills = self.possibleFills(wi)\n for f in fills:\n derived.append((f, i))\n return derived",
"def getAllHumanActions(self):\n return self.human_policy.actions"
]
| [
"0.60004616",
"0.57317823",
"0.56835073",
"0.5680508",
"0.5658361",
"0.56231767",
"0.55279064",
"0.54998857",
"0.5439864",
"0.5381623",
"0.53224576",
"0.53194034",
"0.5273951",
"0.5268868",
"0.5265452",
"0.5254681",
"0.522656",
"0.522656",
"0.5225555",
"0.52229613",
"0.5211053",
"0.52042794",
"0.52039695",
"0.5201401",
"0.5184221",
"0.5180577",
"0.5171625",
"0.51599795",
"0.51352966",
"0.51263154"
]
| 0.78682524 | 0 |
Return the possible phrases that can be said in the current scope. | def phrases(self) -> set:
return set(phrase for phrase in self._current_scope.keys()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lookup_pronunciations_for_phrase(words: Sequence[Text]) -> Sequence[Phrase]:\n return EnglishUtils.all_possible_phrases_for(words)",
"async def phrases(self, ctx):\n settings = await self.fetch_settings(ctx)\n cats, cats_remain = self.format_list(settings['cattriggers'])\n dogs, dogs_remain = self.format_list(settings['dogtriggers'])\n\n if not cats and not dogs:\n return await ctx.send(f'😿 I have no phrases to respond to on **{ctx.guild.name}**!')\n\n if ctx.channel.type == discord.ChannelType.text:\n message = f'🐱 Here are the phrases used to summon me on **{ctx.guild.name}**:\\n'\n else:\n message = f'🐱 Here are the phrases used to summon me in DMs:\\n'\n\n if cats:\n message += f'\\nCats: **{cats}**'\n if cats_remain:\n message += f' and **{cats_remain}** more...'\n if not cats and cats_remain:\n message += f'\\nCats: **{cats_remain}** phrase{\"s\" if cats_remain != 1 else \"\"} that {\"are\" if cats_remain != 1 else \"is\"} too long to fit here!'\n \n if dogs:\n message += f'\\nDogs: **{dogs}**'\n if dogs_remain:\n message += f' and **{dogs_remain}** more...'\n if not dogs and dogs_remain:\n message += f'\\nDogs: **{dogs_remain}** phrase{\"s\" if dogs_remain != 1 else \"\"} that {\"are\" if dogs_remain != 1 else \"is\"} too long to fit here!'\n \n if settings['require_mention']:\n message += f'\\n\\nYou need to @mention me for me to respond on **{ctx.guild.name}**!'\n await ctx.send(message)",
"def get_valid_phrases():\n return [x[0] for x in all_topics if x[1] == \"1\"]",
"def actions(self, phrase: str) -> list:\n return self._current_scope.get(phrase, [])",
"def lemmatized_phrases(self):\n phrases = [set(lower_words(TextBlob(p).words.lemmatize()))\n for p in self.blob.noun_phrases]\n return [' '.join(p) for p in phrases if not STOPWORDS.intersection(p)]",
"def get_words():\n\tprompts = []\n\tfor prompt in story.prompts:\n\t\tprompts.append(prompt.replace('_', ' '))\n\n\treturn render_template(\"get-words.html\", prompts = prompts, key_prompts = story.prompts, num_of_prompts = len(prompts))",
"def check_banned_phrases(self, text):\n if re.search(r'\\bfrom +to +where\\b', text, re.IGNORECASE):\n return 'from to where'\n if re.search(r'\\bwhere +where\\b', text, re.IGNORECASE):\n return 'where where'\n return ''",
"def allPossibleWords(Rack):\n def checkWord(word):\n return stringInRack(word,Rack)\n return filter(checkWord, Dictionary)",
"def hot_phrases(self):\r\n return self._get('hot_phrases', {})",
"def get_custom_phrases():\n return [x[0] for x in all_topics if x[2] == \"1\"]",
"def parser_words(self):\n words = self.query_no_accent.split()\n query_words = []\n for word in words:\n if (word not in STOP_WORDS and word not in QUESTION_WORDS):\n query_words.append(word)\n query_words = ' '.join(query_words)\n return query_words",
"def possession_ques(analysis):\n\n #processing as statement\n phrase = statement(analysis)\n\n #We have to know if it is plural or singular\n if other_functions.plural_noun(analysis.sn) == 1:\n return ['whose'] + phrase[:len(phrase) - 1] + ['these'] + ['?']\n else:\n return ['whose'] + phrase[1:len(phrase) - 1] + ['this'] + ['?']",
"def words(self) -> List[str]:\n return pulumi.get(self, \"words\")",
"def words(self) -> List[str]:\n return pulumi.get(self, \"words\")",
"def bursting_phrases(self):\r\n return self._get('bursting_phrases', {})",
"def _complete_actions(self, text):\r\n return [ a + ' ' for a in self.vocab if a.startswith(text)]",
"def target_words(self) -> List[str]:\n return list(map(\n lambda w: self.spaces[w.lower()] \n if w.lower() in self.spaces else w.lower(), \n self.keywords\n ))",
"def known(words: list[str]) -> list[str]:\n return [z for z in list(set(words)) if z in self.words]",
"def get_speech(self, word):\n posses = ['verb', 'noun', 'adj', 'adv', 'as in', 'conjunction']\n speeches = []\n\n def get_all_synonyms(word1, speech1):\n for w in Word(word1).synonyms('all', partOfSpeech=speech1):\n if not w == []:\n return w\n return []\n\n def empty_tree(input_list):\n # print(input_list)\n if type(input_list) == type([]):\n for l in input_list:\n if not empty_tree(l):\n return False\n return True\n else:\n return False\n\n for poss in posses:\n if not empty_tree(get_all_synonyms(word, poss)):\n speeches.append(poss)\n return speeches",
"def words(self) -> List[str]:\n return list(self.solutions)",
"def interestingWords(self):\n words = set([])\n for token in self.importantTokenList():\n if token.isStopWord() == False:\n words.add(token.text.lower())\n return words",
"def candidate_keywords(self):\n candidate_phrases = list()\n for sentence in self.sentences():\n candidate_sentence = re.sub(self.stopwords_regex(), '|', sentence)\n phrases = candidate_sentence.split(\"|\")\n for phrase in phrases:\n phrase = remove_white_spaces(phrase).lower()\n if phrase:\n candidate_phrases.append(phrase)\n return candidate_phrases",
"def global_matches(self, text):\n import keyword\n matches = []\n n = len(text)\n for word in keyword.kwlist:\n if word[:n] == text:\n matches.append(word)\n for nspace in [builtins.__dict__, self.namespace]:\n for word, val in nspace.items():\n if word[:n] == text and word != \"__builtins__\":\n matches.append(self._callable_postfix(val, word))\n return matches",
"def lookup_pronunciations_for_word(word: Text) -> Sequence[Word]:\n return EnglishUtils.all_possible_forms_for(word)",
"def getConstantSentenceForms(self):",
"def known(words):\n return [w for w in words if w in tokenizer.vocab] #change vocab file?",
"def get_verbs(self) -> Set[str]:",
"def create_phrases(self):\n list_of_phrases = []\n for phrase in self.POTENTIAL_PHRASES:\n list_of_phrases.append(Phrase(phrase))\n return list_of_phrases",
"def words(self):\n punctuation = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n lst = []\n for lines in self.lines:\n words = lines.split(' ')\n for word in words:\n no_punc = ''\n for c in word:\n if c not in punctuation:\n no_punc += c.lower()\n if no_punc != '' and no_punc != '\\n':\n lst.append(no_punc.strip('\\n'))\n return lst\n #no_punc += word.lower()\n #for word in no_punc.split(' ')[:-1]:\n #for word in no_punc:\n # lst.append(word)\n #line = lines.strip(os.linesep) # strips away spaces, \\t (tabs), and \\n (new-lines/enter)\n #print(no_punc)\n #print(lst)",
"def getwords(self):\n return self._aggroWords.keys()"
]
| [
"0.650457",
"0.6290294",
"0.6185158",
"0.6177416",
"0.61262715",
"0.6069863",
"0.599309",
"0.59742767",
"0.59522754",
"0.5952065",
"0.59339577",
"0.58751494",
"0.5860007",
"0.5860007",
"0.5824749",
"0.5822537",
"0.58140486",
"0.58132386",
"0.5788827",
"0.5752199",
"0.5703098",
"0.56960934",
"0.5694445",
"0.5684406",
"0.567999",
"0.56516576",
"0.5642059",
"0.5599118",
"0.5597554",
"0.5571527"
]
| 0.7400061 | 0 |
>>> rI.getRoadInformation("RandomRoad") should return 1,1 (1, 1) | def getRoadInformation(self,nameOfRoad):
exist = getattr(self,'_hashMap',None)
if (exist is not None) and (nameOfRoad in self._hashMap):
return self._hashMap[nameOfRoad]['lanes'],self._hashMap[nameOfRoad]['length']
else:
return 1,1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self) -> tuple:",
"def get_trips(self) -> Tuple[Trip]:\n ...",
"def simpleObjPickRoad(obj, roads):\n # in here the obj (either union or terrace) consists of one building\n fittestRid = -1\n accessPoint = Point(0, 0)\n\n findRoad = False\n\n for road in roads:\n reference = road.geom.project(obj.centroid)\n tempAccessPoint = road.geom.interpolate(reference)\n PointC = (obj.centroid.x, obj.centroid.y)\n PointD = (tempAccessPoint.x, tempAccessPoint.y)\n lineCD = LineString((PointC, PointD))\n\n if type(obj) == pg_read.Union:\n # now we are using a union\n uid = obj.id\n cur.execute(\"select * from unions \\\n where st_intersects(geom, st_geomfromtext('%s', 27700)) \\\n and uid != %d\" % (lineCD.wkt, uid)) # NOQA\n results = cur.fetchall()\n if not results:\n # which means no other unions intersects lineCD\n findRoad = True\n fittestRid = road.id\n accessPoint = tempAccessPoint\n break\n else:\n # which means we are using a terrace\n tid = obj.id\n cur.execute(\"select * from terraces \\\n where st_intersects(geom, st_geomfromtext('%s', 27700)) \\\n and tid != %d\" % (lineCD.wkt, tid)) # NOQA\n results = cur.fetchall()\n if not results:\n # which means no other terraces intersects lineCD\n findRoad = True\n fittestRid = road.id\n accessPoint = tempAccessPoint\n break\n\n if findRoad:\n # if findRoad == True:\n return fittestRid, accessPoint\n else:\n # which means findRoad == False\n # I know it's sad, but we need to have default option here\n # we use the roads[0] as accessRoad anyway\n road = roads[0]\n reference = road.geom.project(obj.centroid)\n fittestRid = road.id\n accessPoint = road.geom.interpolate(reference)\n return fittestRid, accessPoint",
"def getNeighbor(self, neighborID):",
"def address(self) -> tuple[str, int]:",
"def net_xy(street):\r\n\r\n # api-endpoint\r\n URL = \"https://ags.govmap.gov.il/Search/FreeSearch\"\r\n # headers\r\n headers = {\"Content-Type\": \"application/json\", \"charset\": \"utf-8\"}\r\n # location given here\r\n try:\r\n p = \"{\\\"keyword\\\": \\\"\" + street + \"\\\",\\\"LstResult\\\": null}\"\r\n PARAMS = p.encode(\"utf-8\")\r\n\r\n # sending get request and saving the response as response object\r\n r = requests.post(url=URL, data=PARAMS, headers=headers)\r\n\r\n # extracting data in json format\r\n data = r.json()\r\n\r\n # extracting latitude, longitude and formatted address\r\n # of the first matching location\r\n\r\n X = data['data']['Result'][0]['X']\r\n Y = data['data']['Result'][0]['Y']\r\n except Exception as e:\r\n print(e)\r\n # print('exception ddamammnnnnn')\r\n print(street)\r\n return 0,0\r\n return X,Y",
"def find_DPRIPs (roadline,windline):\r\n DPRIPs=[]\r\n ID = []\r\n i = 0 \r\n for w in windline:\r\n int_pt=[]\r\n for r in roadline:\r\n if w.intersection(r): \r\n int_pt.append(w.intersection(r))\r\n if len(int_pt)!=0: \r\n DPRIPs.append(int_pt)\r\n ID.append(1)\r\n else: \r\n ID.append(-999)\r\n i = i + 1\r\n \r\n source['road_access'] = ID\r\n #source with road access downiwnd \r\n swrad = source[source.road_access != -999]\r\n \r\n return swrad,DPRIPs",
"def get_info(atom):\n return [atom.GetIdx(), atom.GetNeighbors()[0].GetIdx()]",
"def test_relation_way_inserted():\n park = query_row(db_conf, 'osm_landusages', -8001)\n assert park['type'] == 'park'\n assert park['name'] == 'rel 8001'\n assert query_row(db_conf, 'osm_roads', 8009)[\"type\"] == 'residential'",
"def test_return_single_residue_info(self):\n\n # Ask for a few residues.\n res1 = mol_res_spin.return_single_residue_info('1')\n res2 = mol_res_spin.return_single_residue_info('2,Glu')\n res4 = mol_res_spin.return_single_residue_info('Pro,4')\n res5 = mol_res_spin.return_single_residue_info('-5')\n\n # Test the data of residue 1.\n self.assertEqual(res1, (1, None))\n\n # Test the data of residue 2.\n self.assertEqual(res2, (2, 'Glu'))\n\n # Test the data of residue 4.\n self.assertEqual(res4, (4, 'Pro'))\n\n # Test the data of the RNA residue -5.\n self.assertEqual(res5, (-5, None))",
"def get_pathway(identifier, organism):\n pass",
"def roadSegments(locations, API_key=\"Avah46_M-gfFeQ3P1w09Qq1ElAV9ZEHFDm9b8JRCRa8qPP5uVn21hDqAPVJgV4i_\"): \n \n # Base URL\n uri = 'http://dev.virtualearth.net/' # Resource URL \n path = 'REST/v1/Routes?'\n \n \n # URL Parameters\n params = { 'wayPoint.0' : locations[0]+',Singapore',\n 'wayPoint.1' : locations[1]+',Singapore',\n 'routeAttributes':'routePath',\n 'key' : API_Key} # by default 'optimize' : 'time'} # this is by default\n \n url = uri+path\n\n results = requests.get(\n url,\n params = params\n ).json()# ['resourceSets']\n\n # Retrieving values\n statusCode = results['statusCode']\n if statusCode == 200:\n # print(statusCode)\n\n # TODO review the exceptions and modify these basic exception handlings\n try:\n travelDistance = results['resourceSets'][0]['resources'][0]['travelDistance']\n except:\n travelDistance = 0\n try:\n travelDuration = results['resourceSets'][0]['resources'][0]['travelDuration']\n except:\n travelDuration = 0\n try:\n travelDurationTraffic = results['resourceSets'][0]['resources'][0]['travelDurationTraffic']\n except:\n travelDurationTraffic = 0\n\n try:\n numberSegments = len(results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems'])\n except:\n numberSegments = 0\n try:\n itineraryItems = results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems']\n except:\n itineraryItems = 'No items'\n\n pathCoord = results['resourceSets'][0]['resources'][0]['routePath']['line']['coordinates']\n\n roadName = []\n travelDistances = []\n travelDurations = []\n maneuverType = []\n\n for seg in itineraryItems:\n for i in range(len(seg['details'])):\n # print(i)\n try:\n roadName.append(seg['details'][i]['names'])\n except:\n roadName.append(0)\n try:\n travelDistances.append(seg['travelDistance'])\n except:\n travelDistances.append(0)\n\n try:\n travelDurations.append(seg['travelDuration'])\n except:\n travelDurations.append(0)\n try:\n maneuverType.append(seg['details'][i]['maneuverType'])\n except:\n maneuverType.append(0)\n\n\n return statusCode,travelDistance,travelDuration,travelDurationTraffic,numberSegments,roadName, \\\n travelDistances, travelDurations, maneuverType, pathCoord\n\n else:\n print(\"Unsuccessful route calculation.\")",
"def pickFittestRoad(obj, roads):\n fittestRid = -1\n accessPoint = Point(0, 0)\n\n PointA, PointB = objDiameter(obj)\n\n findRoad = False\n\n for road in roads:\n reference = road.geom.project(obj.centroid)\n tempAccessPoint = road.geom.interpolate(reference)\n PointC = (obj.centroid.x, obj.centroid.y)\n PointD = (tempAccessPoint.x, tempAccessPoint.y)\n deltaX = PointC[0] - PointA[0]\n deltaY = PointC[1] - PointA[1]\n PointE = (PointD[0] - deltaX, PointD[1] - deltaY)\n sideA = LineString((PointA, PointB)).length\n sideB = LineString((PointA, PointE)).length\n sideC = LineString((PointB, PointE)).length\n\n angle = getAngle(sideA, sideB, sideC)\n if angle > 90:\n angle = 180 - angle\n\n if angle > 30:\n # we think this angle is large enough\n # one more check, this straightLine CD had better not intersect another obj # NOQA\n lineCD = LineString((PointC, PointD))\n tid = obj.id\n cur.execute(\"select * from terraces \\\n where st_intersects(geom, st_geomfromtext('%s', 27700)) \\\n and tid != %d\" % (lineCD.wkt, tid)) # NOQA\n results = cur.fetchall()\n if not results:\n # which means no other terraces intersects lineCD\n findRoad = True\n fittestRid = road.id\n accessPoint = tempAccessPoint\n break\n\n if findRoad:\n return fittestRid, accessPoint\n\n else:\n # which means findRoad == False\n # we use the middle point of the roads[0] as access point\n terraceList[obj.id].projectType = 'special'\n road = roads[0]\n reference = road.geom.length * 0.5\n accessPoint = road.geom.interpolate(reference)\n fittestRid = road.id\n return fittestRid, accessPoint",
"def get(self) -> Tuple[str, Tuple]:\n # TODO:\n pass",
"def GetPoint1(self):\n ...",
"def GetPoint1(self):\n ...",
"def getInfo():",
"def get_details(self):",
"def GetPoint2(self):\n ...",
"def GetPoint2(self):\n ...",
"def _get_pieces(self, board):\r\n roads = list()\r\n settlements = list()\r\n cities = list()\r\n robber = None\r\n \r\n for (_, coord), piece in board.pieces.items():\r\n if piece.type == PieceType.road:\r\n roads.append((coord, piece))\r\n elif piece.type == PieceType.settlement:\r\n settlements.append((coord, piece))\r\n elif piece.type == PieceType.city:\r\n cities.append((coord, piece))\r\n elif piece.type == PieceType.robber:\r\n if robber is not None:\r\n logging.critical('More than one robber found on board, there can only be one robber')\r\n robber = (coord, piece)\r\n if robber is None:\r\n logging.critical('No robber found on the board, this is probably wrong')\r\n return roads, settlements, cities, robber",
"def get_data( obj, prm, lev, date, timelevel=0 ):\n \n parameter = obj( name = prm, level = lev, dataDate = date )[ timelevel ]\n print( parameter.dataDate )\n \n #-----Checking grit type----------------------------------------------\n if parameter.gridType == \"sh\":\n lat, lon, data = sh( parameter.values )\n elif parameter.gridType == \"reduced_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n elif parameter.gridType == \"regular_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n else: \n print ( parameter.gridType )\n \n return lat, lon, data",
"def get_coord(tic):\n try:\n catalog_data = Catalogs.query_object(objectname=\"TIC\"+tic, catalog=\"TIC\")\n ra = catalog_data[0][\"ra\"]\n dec = catalog_data[0][\"dec\"]\n # print(catalog_data.keys())\n # print(catalog_data[0][\"GAIA\"])\n return ra, dec\n except:\n \tprint(\"ERROR: TIC not found in Simbad\")",
"def success_geo(lat,lng):\n return (lng,lat)",
"def route_info(g, journey):\n distance = 0\n cost = 0.00\n time = 0\n check = 0\n \n for i in range(0, len(journey) - 1):\n city_name = journey[i]\n city_next = journey[i + 1]\n code_city = g.convert[city_name] \n code_next = g.convert[city_next]\n \n for flight in g.city_dict[code_city].get_flights_out():\n if(flight[0] == code_next):\n distance = distance + flight[1]\n time = time + route_info_helper(g, code_city, code_next, flight[1])\n if(i < 7):\n cost = cost + (distance * (0.35 - (i * 0.05)))\n \n check = check + 1\n if((check + 1) == len(journey)):\n return distance, cost, time\n else:\n print(\"Invalid Route\")\n return 0, 0, 0",
"def get_station_boroughs(self):\\",
"def test_residential_to_secondary2():\n assert query_row(db_conf, 'osm_roads', 40001)['type'] == 'secondary'\n assert query_row(db_conf, 'osm_roads_gen0', 40001)['type'] == 'secondary'\n assert query_row(db_conf, 'osm_roads_gen1', 40001)['type'] == 'secondary'",
"def genotype(rsid):\n if rsid[0] == 'I' or rsid[0] == 'i':\n return { 'error': 'Cannot find indicators, must use rs #s'}\n soup = BeautifulSoup(urllib.urlopen('http://snpedia.com/index.php/Special:Browse/' + rsid).read())\n trows = soup('table')[1].find_all('tr')\n if len(trows) < 2:\n return { 'error': 'That rsid does not have any data/does not exist.' }\n locations = getLocations(soup)\n genotypeData = getData(locations, soup)\n genotypeData['rsid'] = rsid\n return genotypeData",
"def retrieve_data_tuple(self):\n return ((42,))",
"def test_get_triangle_tuple_all_int(self):\n triangle = (3, 2, 1)\n result = get_triangle_type(triangle)\n self.assertEqual(result, 'scalene')"
]
| [
"0.58014214",
"0.57424927",
"0.5625062",
"0.5350593",
"0.5331557",
"0.5284436",
"0.5245976",
"0.5243223",
"0.52362585",
"0.5231731",
"0.5218621",
"0.52178955",
"0.51694965",
"0.5164629",
"0.514382",
"0.514382",
"0.51375484",
"0.512408",
"0.510979",
"0.510979",
"0.5107514",
"0.5102465",
"0.50915074",
"0.5062896",
"0.5061611",
"0.5058112",
"0.50366944",
"0.50356674",
"0.50332874",
"0.50332844"
]
| 0.6431428 | 0 |
Check if folders fo logs and DB exists, create folders if don't. | def check_if_dir_exists():
if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + ".." + os.sep + "logs"):
try:
os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + ".." + os.sep + "logs")
logger.debug("Dir for logs has been created")
except OSError:
logger.debug(f"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed")
if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + ".." + os.sep + "db"):
try:
os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + ".." + os.sep + "db")
logger.debug("Dir for DB has been created")
except OSError:
logger.debug(f"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)",
"def create_dirs():\n os.makedirs(ORIGINAL_LOG_DIR, exist_ok=True)",
"def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)",
"def _check_dirs(self):\r\n for dir in [self.papers_dir,\r\n self.buffer_dir]:\r\n if not os.path.exists(dir):\r\n message = f'Dir not exists: {dir}. Making it.'\r\n logging.warning(message)\r\n os.mkdir(dir)",
"def prepare_folders():\n folder_list = [\"./data\", \"./data/stage\", \"./data/spoken\", \"./data/stage_lemmas\", \"./data/spoken_lemmas\"]\n for folder in folder_list:\n if not os.path.exists(folder):\n os.mkdir(folder)\n print(f\"Created folder {folder}\")\n else:\n print(f\"Folder {folder} already existed\")",
"def createLogFolders():\n os.chdir(\"ARCHIVES\")\n logFolder = datetime.datetime.now().strftime(\"ARCHIVE_%d_%b_%Y_%H_%M_%S_0\")\n while logFolder in os.listdir():\n split = logFolder.split('_')\n curIndex = int(split[7])\n nextIndex = curIndex + 1\n split[7] = str(nextIndex)\n logFolder = '_'.join(split)\n os.mkdir(logFolder)\n os.chdir(logFolder)\n os.mkdir(\"Premigration\")\n os.mkdir(\"Migration\")\n os.mkdir(\"Postmigration\")\n os.mkdir(\"Other\")\n print(\"Storing All Logs in ARCHIVES/%s\"%logFolder)\n globs.ARCHIVEFOLDER = os.getcwd()\n os.chdir(globs.PROGDIR)",
"def _ensure_dirs(dirpath):\n if not os.path.isdir(dirpath):\n if os.path.exists(dirpath):\n err = \"log path ({}) exists but is not a directory\"\n raise ConfigError(err.format(dirpath))\n os.makedirs(dirpath, 0o777)",
"def make_dirs():\n global paths_made\n\n #Have we done this already? Then why are we trying to do it again?\n if paths_made:\n return\n\n #Make the dirs\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(datastream_dir, exist_ok=True)\n paths_made = True",
"def __init_log_folder():\n try:\n os.makedirs(Logger.__log_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e",
"def createDirs():\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/Serotyping_Database/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/Serotyping_Database/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/databases/VF_Database/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/databases/VF_Database/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Results/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Results/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/xml/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/xml/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Uploads/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Uploads/')\n\n if not os.path.isdir(SCRIPT_DIRECTORY + '../temp/Results/RGI/'):\n os.mkdir(SCRIPT_DIRECTORY + '../temp/Results/RGI/')",
"def create_dirs():\n run(\"mkdir -p %s\"%RUN_DIR)\n run(\"mkdir -p %s\"%LOG_DIR)",
"def _create_folders(self):\n if not os.path.exists(os.path.join(BASE_DIR, DIR)):\n os.mkdir(os.path.join(BASE_DIR, DIR))\n directory = os.path.join(BASE_DIR, DIR, self.title)\n if not os.path.exists(directory):\n os.mkdir(directory)\n return directory",
"def _make_log_dir(self, path):\n\n try:\n os.makedirs('/'.join([self._logpath, path]))\n except OSError, e:\n # Return True if dir already exists\n if e.args[0] is 17:\n return\n\n # Some other error; raise exception\n raise e\n\n return",
"def create_dirs(self):\n for new_directory in [self.event_dir, self.event_dir / 'videos']:\n new_directory.mkdir(exist_ok=self.overwrite)\n logger.debug('Dir {} created', new_directory)",
"def _create_log_dir():\n if not os.path.exists(FLASK_APP.config[\"LOG_DIR\"]):\n os.makedirs(FLASK_APP.config[\"LOG_DIR\"])",
"def create_folders():\n os.makedirs(GRID_DIR, exist_ok=True)",
"def ensure_dirs(cls, folder_path):\n try:\n cls.mkdirs(folder_path)\n except exceptions.PlotlyRequestError as e:\n if \"already exists\" in e.message:\n pass\n else:\n raise e",
"def checkDirectory(path,logger):\n newPath = completePath(path)\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n if (logger):\n print(\"Did not found required directories. Creating them...\")\n else:\n if (logger):\n print(\"Found the required directories!\")",
"def setup_directories():\n run('mkdir -p %(path)s' % env)\n run('mkdir -p %(env_path)s' % env)\n run('mkdir -p %(log_path)s;' % env)\n sudo('chgrp -R www-data %(log_path)s; chmod -R g+w %(log_path)s;' % env)\n \n with settings(warn_only=True):\n run('ln -s %(log_path)s %(path)s/logs' % env)",
"def make_folders(self):\n\t\tfor name in self.folders:\n\t\t\tos.makedirs(self.path+\"/\"+name,exist_ok=True)",
"def create_app_folders(self):\n\t\tif not os.path.exists(self.TEMP_FOLDER):\n\t\t\tos.makedirs(self.TEMP_FOLDER)\n\t\tif not os.path.exists(self.SAVE_FOLDER):\n\t\t\tos.makedirs(self.SAVE_FOLDER)",
"def _create_folders(tmp_folder: str = None):\n if not os.path.exists(tmp_folder):\n os.makedirs(tmp_folder)\n logging.info(\"Created folder: %s\", tmp_folder)\n\n tmp_user_data = tmp_folder + \"/user-data\"\n if not os.path.exists(tmp_user_data):\n os.makedirs(tmp_user_data)\n logging.info(\"Created folder: %s\", tmp_user_data)\n\n tmp_data_path = tmp_folder + \"/data-path\"\n if not os.path.exists(tmp_data_path):\n os.makedirs(tmp_data_path)\n logging.info(\"Created folder: %s\", tmp_data_path)\n\n tmp_cache_dir = tmp_folder + \"/cache-dir\"\n if not os.path.exists(tmp_cache_dir):\n os.makedirs(tmp_cache_dir)\n logging.info(\"Created folder: %s\", tmp_cache_dir)",
"def check_if_upload_folders_exist():\r\n\timport os\r\n\tfrom django.conf import settings\r\n\r\n\tif not os.path.exists(settings.MEDIA_ROOT):\r\n\t\tos.makedirs(settings.MEDIA_ROOT)\r\n\t\tprint(\"Created folder \" + settings.MEDIA_ROOT)\r\n\r\n\tif not os.path.exists(settings.EMAIL_FILE_PATH):\r\n\t\tos.makedirs(settings.EMAIL_FILE_PATH)\r\n\t\tprint(\"Created folder \" + settings.EMAIL_FILE_PATH)",
"def create_dataset_folder_structure():\n\n path = Path(f'{DATASETS}/{FEATURES_DATASET}')\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n\n try:\n for path in new_sensor_paths:\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n else:\n print(\"\\nPath already exists!\")\n except:\n return False\n else:\n return True",
"def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()",
"def create_data_folders() -> None:\n if not os.path.exists(\"data/save\"):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/save\")\n if not os.path.exists(\"data/critics\"):\n os.mkdir(\"./data/critics\")\n if not os.path.exists('data/policies/'):\n os.mkdir('data/policies/')\n if not os.path.exists('data/results/'):\n os.mkdir('data/results/')",
"def create_folder_if_needed(path):\n if os.path.exists(path):\n print(\"{} dir exists\".format(path))\n else:\n print(\"{} dir does not exist. Creating dir.\".format(path))\n os.mkdir(path)",
"def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))",
"def create_folders(self):\n for f in self.params['folder_names']:\n if not os.path.exists(f):\n print 'Creating folder:\\t%s' % f\n os.system(\"mkdir %s\" % (f))",
"def ensure_data_folder_existence() -> None:\n folder_name = params.DATA_FOLDER_NAME\n if not folder_name in os.listdir('.'):\n os.mkdir(folder_name)"
]
| [
"0.75340325",
"0.74847454",
"0.74525905",
"0.73823094",
"0.73680645",
"0.72598135",
"0.7199667",
"0.71511656",
"0.7090995",
"0.70324355",
"0.69800925",
"0.6928911",
"0.69100326",
"0.69078887",
"0.6882662",
"0.6876244",
"0.68343437",
"0.6815257",
"0.6797767",
"0.6752044",
"0.6745973",
"0.67385614",
"0.67093045",
"0.67021346",
"0.669965",
"0.6698968",
"0.6694782",
"0.6689013",
"0.6689013",
"0.66729295"
]
| 0.7939407 | 0 |
Whether to allow the message given the current state of the guard | def allow(self, message):
if message.author.id == Guard.AUTHOR:
return True
if message.author.id in Guard.BANNED_USERS:
return False
if self.state == State.TRUSTED_ONLY and not Guard.is_trusted(message):
return False
if self.state == State.SUDO_ONLY and not Guard.allow_sudo(message):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_message(self, msg):\n pass",
"def collect_allowed(message):\n return True",
"def IsAllowed(self):\r\n\r\n return self.notify.IsAllowed()",
"def IsAllowed(self):\r\n\r\n return self.notify.IsAllowed()",
"def IsAllowed(self):\r\n\r\n return self.notify.IsAllowed()",
"def can_make_action(self) -> bool:\n return not(self.has_pending_action or self.is_dead())",
"def validate_message(self, state_id, msg):\n pass",
"def can_recept(self, text, *args, **kwargs):\n # such slot always can recept (when message is not empty) because it consumes the message\n if text:\n return True\n else:\n return False",
"def valid_for_send(self, app):\n return (\n (self.to is not None) and\n (self.next_hop is not None) and\n (self.source is not None) and\n (self.command is not None) and\n (self.handler is not None) and\n (self.kind is not None) and\n (self.time_to_live is not None) and\n (self.time_to_live >= app.tick)\n )",
"def ccheck(self, msg):\r\n if msg.channel == self.channel or (msg.channel.is_private and self.ispm):\r\n return True\r\n return False",
"def is_ctrl_message(self):\n return self._id < 0",
"def can_act(self, **kwargs):\n return True",
"def allowSecretChat(self):\n assert self.notify.debugStateCall(self, 'loginFSM', 'gameFSM')\n return (self.secretChatAllowed or \\\n (self.productName == \"Terra-DMC\" and self.isBlue() and self.secretChatAllowed))",
"def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"async def locked(self):\n return not \"not\" in await self.ask(\"locked\")",
"def protected(_):\n return False # This protects nothing",
"def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)",
"async def control_checks(self, ctx):\n server_id = ctx.message.server.id\n requester = ctx.message.author\n #silently drop if not in voice\n if not self.in_voice(server_id):\n return False\n #refuse if user not in the same channel\n if not self.user_in_channel(server_id, requester):\n vcname = self.get_server_dict(server_id)['voice'].channel.name\n await ctx.bot.send_message(ctx.message.channel, \"You can't control me outside of {}.\".format(vcname))\n return False\n return True",
"def violated(self) -> bool:\n ...",
"def is_only_valid_allowed(self) -> bool:\n return self.get_allow_scope() is TxAllowScope.VALID",
"def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.DISABLE,\n ]:\n return False\n\n return True",
"def check_state(self):\n pass",
"def test_user_is_sender_message_is_moderated(self):\n thread = self.create_thread()\n message = thread.first_message\n message.status = 'pending'\n message.save()\n self.assertTrue(message.visible_to_user(message.sender))",
"def CanHandle(self, message):\n return (isinstance(message, messages.ChannelMessage)\n and message.content.startswith(TRIGGER))",
"def allow_sudo(message):\n if message.author.id == Guard.AUTHOR and message.channel.type == discord.ChannelType.private:\n return True\n if message.author.id in Guard.SUDO_IDS and message.channel.id in Guard.SUDO_CHANNELS:\n return True\n return False",
"def is_waiting_for_message(self):\r\n return self.waiting_for_message",
"def is_acceptable(self):\n\n return not self.created and self.signal_type == self.target_signal_type",
"def CanHandleMessage(self, status_message):\n if isinstance(\n status_message,\n (SeekAheadMessage, ProducerThreadMessage, MetadataMessage, FinalMessage,\n RetryableErrorMessage, PerformanceSummaryMessage)):\n return True\n return False"
]
| [
"0.6435458",
"0.64242923",
"0.6385886",
"0.6385886",
"0.6385886",
"0.6313028",
"0.62827015",
"0.62696564",
"0.62615937",
"0.6235407",
"0.6195627",
"0.6128726",
"0.6123504",
"0.6036772",
"0.6009013",
"0.6009013",
"0.6008502",
"0.6004612",
"0.5997457",
"0.59911567",
"0.59179616",
"0.5912572",
"0.59027284",
"0.58918846",
"0.5845972",
"0.5834275",
"0.5816964",
"0.5812274",
"0.5797963",
"0.57951415"
]
| 0.7359263 | 0 |
Returns whether in the circumstances of the given message, a sudo action is allowed | def allow_sudo(message):
if message.author.id == Guard.AUTHOR and message.channel.type == discord.ChannelType.private:
return True
if message.author.id in Guard.SUDO_IDS and message.channel.id in Guard.SUDO_CHANNELS:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_should_be_able_to_use_sudo(driver):\n assert \"lectured\" in sudo_results, str(sudo_results)",
"def allow(self, message):\n if message.author.id == Guard.AUTHOR:\n return True\n if message.author.id in Guard.BANNED_USERS:\n return False\n if self.state == State.TRUSTED_ONLY and not Guard.is_trusted(message):\n return False\n if self.state == State.SUDO_ONLY and not Guard.allow_sudo(message):\n return False\n return True",
"def _test_sudo(self) -> bool:\n self.debug('Check if sudo is necessary.', level=2)\n command = Command('whoami')\n user_output = self.guest.execute(command, silent=True)\n if user_output.stdout is None:\n raise tmt.utils.RunError(\n 'unexpected command output',\n command,\n 0,\n user_output.stdout,\n user_output.stderr)\n\n return user_output.stdout.strip() != 'root'",
"def verify_passwordless_sudo():\n\n args = [\"sudo\", \"-n\", \"/bin/true\"]\n\n proc = subprocess.Popen(args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n\n msg = proc.communicate()[0]\n\n if proc.returncode != 0:\n iotests.notrun('requires password-less sudo access: %s' % msg)",
"def has_perm(self, user, perm):\r\n #superuser has all rights\r\n if user.is_superuser:\r\n return True\r\n if perm in [OI_READ, OI_ANSWER]:\r\n if self.project:\r\n return self.project.has_perm(user, perm)\r\n else:\r\n return True\r\n else:\r\n return self.author == user #only author can modify the message\r",
"def check_sudo(self, uid: str) -> None:\n stdout, stderr = self.syscall(os.popen(\"which sudo\").read().strip(), \"-nu\", uid, \"-S\", \"true\", \"/bin/bash\")\n if stdout or stderr:\n raise GateException(\"Access denied to UID '{}' via sudo.\".format(uid))",
"def sudo(command):\n return Effect(Sudo(command=command))",
"def in_sudo_mode():\n if not 'SUDO_UID' in os.environ.keys():\n print(\"Try running this program with sudo.\")\n exit()",
"def test_sudo(self):\n self.assertEqual(self.host.user().name, \"matlab\")\n self.assertTrue(self.host.run(\"sudo echo 'Hello World'\").succeeded)",
"def check_sudo_rules(self):\n result = ([], False)\n sfile = '/etc/sudoers'\n fm = FileManager(sfile)\n\n if fm.file.is_readable:\n result = fm.parse_sudoers(sfile)\n else:\n result = SudoList().parse()\n \n return 'sudo_rules', result",
"def test_edit_user_enable_permit_sudo(driver):\n pass",
"def sudo():\n try:\n run('sudo whoami')\n return 'sudo'\n except:\n return ''",
"def sudosu(self, password=\"\"):\n self._channel.send(\"sudo su -\\n\")\n xpass = re.compile(r'^.*password for .+:\\s*$', re.I|re.M)\n xprompt = re.compile(r'[\\>\\]$#] ?$', re.I|re.M)\n xfail = re.compile(r'incorrect password attempts', re.I|re.M)\n passok = True\n buf = \"\"\n while True:\n ibuf = self._channel.recv(65536)\n buf += ibuf\n if xpass.search(ibuf):\n time.sleep(0.25)\n self._channel.sendall(password + \"\\n\")\n elif xfail.search(ibuf):\n passok = False\n\n # check for prompt\n if xprompt.search(ibuf):\n break\n\n return passok",
"def can(user, action):\n\n v = bitvector.BitVector(user.access_level)\n return v.is_set(EVERYTHING) or v.is_set(action)",
"def is_user_message(message):\n return (message.get('message') and\n message['message'].get('text') and\n not message['message'].get(\"is_echo\"))",
"def is_trusted(message):\n author = message.author\n if author.id == Guard.AUTHOR:\n return True\n if author.id in Guard.BANNED_USERS:\n return False\n try:\n if set([role.name for role in author.roles]).intersection(Guard.TRUSTED_ROLES):\n return True\n except:\n return False\n return False",
"def collect_allowed(message):\n return True",
"def sudo(self, script, *args, **kwargs):\n return self._run('sudo', script, *args, **kwargs)",
"def _detect_sudo(self, _execnet=None):\n exc = _execnet or execnet\n gw = exc.makegateway(\n self._make_connection_string(self.hostname, use_sudo=False)\n )\n\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())'\n )\n\n result = channel.receive()\n gw.exit()\n\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True",
"def cant(user, action):\n\n return not can(user, action)",
"def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_privilege_escalation\")",
"def allow_privilege_escalation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_privilege_escalation\")",
"def check_if_help_message(message):\n return \"The commands are\" in message",
"def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)",
"def can_recept(self, text, *args, **kwargs):\n # such slot always can recept (when message is not empty) because it consumes the message\n if text:\n return True\n else:\n return False",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def OnPrivActionMessage(self, msg):\n self.handle_inbound_irc_msg(\"OnPrivActionMessage\", msg)\n return znc.CONTINUE",
"def can(self, unused_perm):\n return False",
"def verify_unmute_cli(node: CephAdmin, alert: str, **kwargs) -> bool:\n if not verify_alert(node=node, alert=alert, **kwargs):\n log.error(f\"Unable to unmute the alert : {alert}\")\n return False\n\n if not unmute_health_alert(alert=alert, node=node):\n log.error(f\"Unable to unmute the alert : {alert}\")\n return False\n\n log.info(f\"The alert : {alert} is un-muted. Scenario Pass\")\n generate_health_alert(alert=alert, node=node, clear=True, **kwargs)\n return True"
]
| [
"0.68108046",
"0.6810564",
"0.65219665",
"0.6107883",
"0.6106064",
"0.59634566",
"0.5876366",
"0.58678156",
"0.58661014",
"0.5843343",
"0.5835886",
"0.5754817",
"0.5732411",
"0.57277626",
"0.56578225",
"0.56416506",
"0.56124586",
"0.56103116",
"0.559504",
"0.5545683",
"0.5458358",
"0.5458358",
"0.54502517",
"0.5426484",
"0.54231054",
"0.5422713",
"0.5422713",
"0.54213923",
"0.54026115",
"0.5400806"
]
| 0.7590361 | 0 |
Returns whether the circumstances of the message, the author is trusted | def is_trusted(message):
author = message.author
if author.id == Guard.AUTHOR:
return True
if author.id in Guard.BANNED_USERS:
return False
try:
if set([role.name for role in author.roles]).intersection(Guard.TRUSTED_ROLES):
return True
except:
return False
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def allow(self, message):\n if message.author.id == Guard.AUTHOR:\n return True\n if message.author.id in Guard.BANNED_USERS:\n return False\n if self.state == State.TRUSTED_ONLY and not Guard.is_trusted(message):\n return False\n if self.state == State.SUDO_ONLY and not Guard.allow_sudo(message):\n return False\n return True",
"def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text",
"def _is_acceptable_for_signing(self, tx_message: TransactionMessage) -> bool:\n result = (\n (self._is_valid_message(tx_message) or self._is_valid_tx(tx_message))\n and self._is_utility_enhancing(tx_message)\n and self._is_affordable(tx_message)\n )\n return result",
"def IsApplicationTrustedToRun(self) -> bool:",
"def hasVeryTrustedValue(self):\n return self.subnode_source.hasVeryTrustedValue()",
"def test_func(self):\n answer = self.get_object()\n return True if self.request.user == answer.author or self.request.user.is_superuser else False",
"def allow_sudo(message):\n if message.author.id == Guard.AUTHOR and message.channel.type == discord.ChannelType.private:\n return True\n if message.author.id in Guard.SUDO_IDS and message.channel.id in Guard.SUDO_CHANNELS:\n return True\n return False",
"def collect_allowed(message):\n return True",
"def IsTriggeredBy(self, eventObject):\n if not eventObject.IsKindOf(acm.FSettlement):\n return False\n\n settlement = eventObject\n \n if not _is_valid_sbl_security_loan(settlement):\n return False\n \n partial_return_identifier = settlement.Trade().Text1()\n if settlement.Status() != 'Authorised':\n return False\n \n if partial_return_identifier not in ['', None]:\n return False\n\n return True",
"def user_can_comment(user, document, privileged):\n if user.is_government_user and \\\n user.has_perm('DOCUMENTS_GOVERNMENT_REVIEW') and \\\n document.status.status in ['Received', 'Submitted']:\n return True\n\n if not user.is_government_user and not privileged and \\\n document.status.status in ['Draft', 'Submitted']:\n return True\n\n return False",
"def test_is_utility_enhancing(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=\"off_chain\",\n info=self.info,\n tx_nonce=\"Transaction nonce\",\n )\n self.decision_maker.ownership_state._quantities_by_good_id = None\n assert self.decision_maker._is_utility_enhancing(tx_message)",
"def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n return author_id == self.author.id\n return False",
"def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)",
"def test_transaction_is_affordable_there_is_no_wealth(self):\n currency_endowment = {\"FET\": 0}\n good_endowment = {\"good_id\": 0}\n self.ownership_state.init(\n amount_by_currency_id=currency_endowment,\n quantities_by_good_id=good_endowment,\n )\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=\"transaction0\",\n tx_sender_addr=\"agent_1\",\n tx_counterparty_addr=\"pk\",\n tx_amount_by_currency_id={\"FET\": 0},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 0},\n info={\"some_info_key\": \"some_info_value\"},\n ledger_id=\"fetchai\",\n tx_nonce=\"transaction nonce\",\n )\n\n assert not self.ownership_state.is_affordable_transaction(\n tx_message=tx_message\n ), \"We must reject the transaction.\"",
"def authorize(self, action, author_id=None):\n if Identity.authorize(self, action, author_id=author_id):\n return (self.id == author_id)\n return False",
"def is_for_me(event):\n # check if not my own event\n\n type = event.get('type')\n\n if type and type == 'message' and not(event.get('user') == VALET_SLACK_ID):\n\n if is_private(event):\n return True\n text = event.get('text')\n # channel = event.get('channel')\n if type and type == 'message' and text.startswith(\"@td \"):\n return True\n if type and type == 'message' and text.startswith(\"@t\"):\n return True\n if type and type == 'message' and text.startswith(\"@cl\"):\n return True\n if valet_slack_mention in text.strip().split():\n return True",
"def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author",
"def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author",
"def is_special_message(self):\n if not self.is_valid():\n return False\n \n # TODO: what if the author is wrong? then these don't match at all!\n for nickname in AUTHOR_TO_NICKNAME[self.author]:\n \n if self.content == f\"{nickname} changed the chat theme.\":\n return True\n \n if self.content == f\"{nickname} joined the video chat.\":\n return True\n \n if self.content == f\"{nickname} joined the call.\":\n return True\n \n if self.content.startswith(f\"{nickname} named the group\"):\n return True\n \n if self.content == f\"{nickname} removed the group name.\":\n return True\n \n if self.content == f\"{nickname} sent a link.\":\n return True\n \n if self.content == f\"{nickname} sent an attachment.\":\n return True\n \n if self.content.startswith(f\"{nickname} set the emoji to\"):\n return True\n \n if self.content == f\"{nickname} changed the group photo.\":\n return True\n \n if is_add_remove_member(self.content, nickname):\n return True\n\n if is_set_nickname(self.content, nickname):\n return True\n \n if is_clear_nickname(self.content, nickname):\n return True\n \n if is_create_group(self.content, nickname):\n return True\n if self.content == f\"{nickname} started a video chat.\":\n return True\n \n if self.content == f\"{nickname} left the group.\":\n return True\n \n if is_poll_message(self.content, nickname):\n return True\n return False",
"def malicious(self):\n return self.probably_malicious",
"def is_authorised_representative(self):\n if not hasattr(self, '_is_authorised_representative'):\n self._is_authorised_representative = hasattr(self, 'authorised_representative')\n\n return self._is_authorised_representative",
"def authorized(self):\n return self.authorization is not None",
"def test_user_is_sender_message_is_moderated(self):\n thread = self.create_thread()\n message = thread.first_message\n message.status = 'pending'\n message.save()\n self.assertTrue(message.visible_to_user(message.sender))",
"def tests_transaction_is_affordable_agent_is_the_seller(self):\n currency_endowment = {\"FET\": 0}\n good_endowment = {\"good_id\": 0}\n self.ownership_state.init(\n amount_by_currency_id=currency_endowment,\n quantities_by_good_id=good_endowment,\n )\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=\"transaction0\",\n tx_sender_addr=\"agent_1\",\n tx_counterparty_addr=\"pk\",\n tx_amount_by_currency_id={\"FET\": 10},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 0},\n info={\"some_info_key\": \"some_info_value\"},\n ledger_id=\"fetchai\",\n tx_nonce=\"transaction nonce\",\n )\n\n assert self.ownership_state.is_affordable_transaction(\n tx_message=tx_message\n ), \"We must reject the transaction.\"",
"def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False",
"def authorize(self, action, author_id=None):\n if Serializable.authorize(self, action, author_id=author_id):\n return (self.id == author_id)\n return False",
"def is_eligible(self, myself):\n if self.author().screen_name == myself.screen_name:\n log_.debug(\"Not replying to my own tweets\")\n return False\n if self.is_retweet():\n log_.debug(\"Not processing pure retweets\")\n return False\n return True",
"def authorize(self, action, author_id=None):\n return False",
"def whyNotLegal(self):\r\n return self._getLegalityStatus()[1]",
"def test_func(self):\n taxonomy = self.get_object()\n return self.request.user == taxonomy.author"
]
| [
"0.6596535",
"0.6152043",
"0.5797919",
"0.57831234",
"0.57756525",
"0.5773149",
"0.577266",
"0.57364255",
"0.5713972",
"0.57097185",
"0.56366843",
"0.5631748",
"0.5630205",
"0.5573819",
"0.55649304",
"0.55580103",
"0.5552814",
"0.5552814",
"0.5528097",
"0.5524117",
"0.5512337",
"0.5510707",
"0.55076545",
"0.55063266",
"0.5480969",
"0.5473733",
"0.5458579",
"0.5456786",
"0.5454773",
"0.54437983"
]
| 0.8007942 | 0 |
Returns whether we have the specified permission when replying to the message | def has_permission(message, permission):
if message.channel.type == discord.ChannelType.private:
return True
if getattr(message.channel.guild.me.permissions_in(message.channel), permission):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_perm(self, user, perm):\r\n #superuser has all rights\r\n if user.is_superuser:\r\n return True\r\n if perm in [OI_READ, OI_ANSWER]:\r\n if self.project:\r\n return self.project.has_perm(user, perm)\r\n else:\r\n return True\r\n else:\r\n return self.author == user #only author can modify the message\r",
"def has_permission(self, permission):\n return permission in self._permissions",
"def has_permission(self):\n return super().has_permission()",
"def has_permission(self, perm):\n return self.permissions & perm == perm",
"def can(self, perm):\n return self.role is not None and self.role.has_permission(perm)",
"def can(self, perm):\n return self.role.has_permissions(perm)",
"def checkRemotePerm(self, permission, robject):\n user = getSecurityManager().getUser()\n return user.has_permission(permission, robject.primaryAq())",
"def _has_permission(self, user, user_is_mod, command, db_session):\n\n if command[1] == 'for_all':\n return True\n if command[1] == 'for_mods' and user_is_mod:\n return True\n if type(command[1]) == db.Command:\n db_command = command[1]\n if bool(db_command.permissions) is False:\n return True\n elif user in [permission.user_entity for permission in db_command.permissions]:\n return True\n return False",
"def __has_permission(self, permission) -> bool:\n if self.__manager.is_enabled and not self.__auth_session:\n return False\n\n return self.hasPermission(permission, None)",
"def permit_required(self):\n return \"permission\" in self.description.lower()",
"def _is_user_defined_permission(self, perm: Model) -> bool:\n\n return perm.permission.name in self.OBJECT_SPEC_PERMISSIONS",
"def can_message(guild, channel):\n\treturn authorized(guild, channel) and not muted(guild, channel)",
"def has_perm(self, user):\n return True",
"def check_permissions(permission, payload):\n if 'permissions' not in payload:\n abort(401)\n\n if permission not in payload['permissions']:\n abort(401)\n\n return True",
"def has_permission(self, request, view):\n return True",
"def has_permission(self, request, view):\n return False",
"def hasPermission(self, permission, extra_params):\n\n with DBSession(self.__config_db) as session:\n perm, params = ThriftAuthHandler.__create_permission_args(\n permission, extra_params, session)\n\n return require_permission(perm, params,\n self.__auth_session)",
"def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)",
"def _have_permission(self, user: discord.User, in_guild: discord.Guild) -> bool:\n guild = connector.getGuildByID(in_guild.id)\n\n return (guild.moderator_role_id in [role.id for role in user.roles]) or (in_guild.owner == user)",
"def has_permission(cls, perm, user):\n return perm in cls.get_permissions(user)",
"def has_permission(self, permission: ActionObjectPermission) -> bool:\n collection_permissions_status = self.permissions\n if collection_permissions_status.is_err():\n return False\n collection_permissions: MongoCollection = collection_permissions_status.ok()\n\n # TODO: fix for other admins\n if self.root_verify_key.verify == permission.credentials.verify:\n return True\n\n permissions: Optional[Dict] = collection_permissions.find_one(\n {\"_id\": permission.uid}\n )\n\n if permissions is None:\n return False\n\n if permission.permission_string in permissions[\"permissions\"]:\n return True\n\n # check ALL_READ permission\n if (\n permission.permission == ActionPermission.READ\n and ActionObjectPermission(\n permission.uid, ActionPermission.ALL_READ\n ).permission_string\n in permissions[\"permissions\"]\n ):\n return True\n\n return False",
"def can(self, permissions: Union[str, List]) -> bool:",
"def IsAllowed(self):\r\n\r\n return self.notify.IsAllowed()",
"def IsAllowed(self):\r\n\r\n return self.notify.IsAllowed()",
"def IsAllowed(self):\r\n\r\n return self.notify.IsAllowed()",
"def current_user_has_permission(query: 'Query') -> bool:\n return acl.current_user_has_permission(data_set_acl_resources[query.data_set.id])",
"def can_access(\n self, permission: Union[str, Enum] = None, error: bool = False\n ) -> bool:\n access = (\n permission is None\n or self.belongs_to_user()\n or flask.g.user.has_permission(permission)\n )\n if error and not access:\n raise _403Exception\n return access",
"def has_perm(context, perm, obj):\n return access.has_perm(context['request'].user, perm, obj)",
"def has_perm(context, perm, obj):\n return access.has_perm(context['request'].user, perm, obj)",
"async def can_respond(self, recognized_message: RecognizedMessage, channel: str, **kwargs) -> bool:\r\n pass"
]
| [
"0.73359954",
"0.6755361",
"0.6672767",
"0.6643699",
"0.66324294",
"0.6602714",
"0.65812474",
"0.6541931",
"0.647505",
"0.6463046",
"0.6444851",
"0.64401436",
"0.6407568",
"0.63953",
"0.6367485",
"0.635122",
"0.62977666",
"0.6294286",
"0.6294286",
"0.6286682",
"0.6276007",
"0.62668204",
"0.626337",
"0.626337",
"0.626337",
"0.6252137",
"0.62483245",
"0.62265456",
"0.62265456",
"0.62255657"
]
| 0.7485159 | 0 |
Returns the intent of the message, as defined by the Intent class | def get_intent(msg):
if re.search(MapController.MAP_REGEX, msg.content) and client.user.id in msg.raw_mentions:
return Intent.MAP
elif re.match(Controller.KEY_REGEX, msg.content):
return Intent.DIRECT
else:
return Intent.NONE | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _intent(self) -> MessageIntent:\r\n pass",
"def handle(self, message: discord.Message, intent: Intent) -> Optional[str]:\n pass",
"def get_intent_action(self, intent_keyword):\n pass",
"def open_intent_envelope(message):\n intent_dict = message.data\n return Intent(intent_dict.get('name'),\n intent_dict.get('requires'),\n intent_dict.get('at_least_one'),\n intent_dict.get('optional'))",
"def get_intent(self, intent_name):\n for name, intent in self:\n if name == intent_name:\n return intent\n else:\n return None",
"def get_alexa_intent(data: dict) -> Union[str, None]:\n if \"request\" in data and \"intent\" in data[\"request\"] and \"name\" in data[\"request\"][\"intent\"]:\n return data[\"request\"][\"intent\"][\"name\"]\n else:\n return None",
"def process_intent(self, intent: Intent, game: Game):\n return intent",
"def get_dialogflow_intent(self, data: dict) -> Union[Any, None]:\n if \"result\" in data and \"action\" in data[\"result\"]:\n self.dialogflow_v = 1\n return data[\"result\"][\"action\"]\n elif \"queryResult\" in data and \"action\" in data[\"queryResult\"]:\n self.dialogflow_v = 2\n return data[\"queryResult\"][\"action\"]\n else:\n return None",
"def message(self):\n return self.args[0]",
"def get_intent(self, row_id):\n cursor = self.connection.cursor()\n cursor.execute(\"\"\"\n SELECT intent FROM queries WHERE rowid=(?);\n \"\"\", (row_id,))\n return cursor.fetchone()[0]",
"def on_intent(request, session):\n\n intent_name = request['intent']['name']\n\n # process the intents\n if intent_name == \"comenzar\":\n return get_fact_response()\n elif intent_name == \"otravez\":\n return get_fact_response()\n elif intent_name == \"AMAZON.YesIntent\":\n return get_fact_response()\n elif intent_name == \"AMAZON.NoIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_help_response()\n elif intent_name == \"AMAZON.StopIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.CancelIntent\":\n return get_stop_response()\n elif intent_name == \"AMAZON.FallbackIntent\":\n return get_fallback_response()\n else:\n print(\"invalid Intent reply with help\")\n return get_help_response()",
"def check_intent_attr(self, node, arg):\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n is_ptr = declarator.is_indirect()\n intent = attrs[\"intent\"]\n if intent is None:\n if node is None:\n # do not default intent for function pointers\n pass\n elif declarator.is_function_pointer():\n intent = \"in\"\n elif not is_ptr:\n intent = \"in\"\n elif arg.const:\n intent = \"in\"\n elif arg.typemap.sgroup == \"void\":\n # void *\n intent = \"in\" # XXX must coordinate with VALUE\n else:\n intent = \"inout\"\n # XXX - Do hidden arguments need intent?\n else:\n intent = intent.lower()\n if intent in [\"in\", \"out\", \"inout\"]:\n meta[\"intent\"] = intent\n else:\n raise RuntimeError(\"Bad value for intent: \" + attrs[\"intent\"])\n if not is_ptr and intent != \"in\":\n # Nonpointers can only be intent(in).\n raise RuntimeError(\"{}: Only pointer arguments may have intent attribute\".format(node.linenumber))\n meta[\"intent\"] = intent\n return intent",
"def get_intent(self, utterance, lang=\"en-us\"):\n msg = Message(\"intent.service.intent.get\",\n {\"utterance\": utterance, \"lang\": lang},\n context={\"destination\": \"intent_service\",\n \"source\": \"intent_api\"})\n resp = self.bus.wait_for_response(msg,\n 'intent.service.intent.reply',\n timeout=self.timeout)\n data = resp.data if resp is not None else {}\n if not data:\n LOG.error(\"Intent Service timed out!\")\n return None\n return data[\"intent\"]",
"def intent(self) -> typing.Tuple[str, ...]:\n return self._intent.members()",
"def message(self) -> AgentMessage:\n return self._message",
"def respond_to_intent(self, intent):\n if type(intent) is BARTQueryIntent:\n return self.respond_to_bart_intent(intent)\n elif type(intent) is BusQueryIntent: \n return self.respond_to_bus_intent(intent)\n else:\n return HelpIntent()",
"def get_intent(sentence):\n\n X = \" \".join(sentence)\n X_encoding = tokenizer.encode_plus(\n X, add_special_tokens=True, return_tensors='pt')\n X_ids = X_encoding['input_ids'].to(DEVICE)\n\n with torch.no_grad():\n output = bert_model(X_ids)\n\n ID_model.eval()\n ID_input = get_representation(output)[0]\n ID_output = ID_model(ID_input)\n result = torch.argmax(ID_output).item()\n\n if result == 0:\n return \"+\"\n if result == 1:\n return \"-\"",
"def on_intent(request, session):\n\n intent = request['intent']\n\n print(\"on_intent:\", intent)\n\n if intent[\"name\"] == \"AntwortIntent\":\n return handle_answer_request(intent, session)\n elif intent[\"name\"] == \"DontKnowIntent\":\n return handle_answer_request(intent, session)\n elif intent['name'] == \"AMAZON.RepeatIntent\":\n return handle_repeat_request(intent, session)\n elif intent['name'] == \"AMAZON.StopIntent\" or intent['name'] == \"AMAZON.CancelIntent\":\n return handle_finish_session_request(intent, session)\n elif intent['name'] == \"AMAZON.HelpIntent\":\n return get_help(intent, session)\n elif intent['name'] == \"StartQuizIntent\" or intent['name'] == \"AMAZON.StartoverIntent\":\n if session[\"new\"] == False:\n return get_welcome_message(restart=True)\n #if no intent is identified:\n return get_help(intent, session)",
"def message(self) -> \"str\":\n return self._attrs.get(\"message\")",
"def message(self) -> \"str\":\n return self._attrs.get(\"message\")",
"def whoami( self, mess, args):\n return mess.getFrom()",
"def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"NumberFact\":\n return num_fact(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")",
"def get_message(self):\n return self.message",
"def get_message(self):\n return self.message",
"def message_for_activation(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"message_for_activation\")",
"def get_message(self):\n return self.msg",
"def on_intent(intent_request, session):\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n print(intent)\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"WhoIs\":\n intent_search = intent['slots']['ASN']['value']\n return whois(intent_search)\n elif intent_name == \"WherePeer\":\n intent_search = intent['slots']['company']['value']\n return wherePeer(intent_search)\n elif intent_name == \"WhoPeers\":\n intent_search = intent['slots']['IX']['value']\n return whoPeers(intent_search)\n elif intent_name == \"WhosAt\":\n intent_search = intent['slots']['facility']['value']\n return whosAt(intent_search)\n elif intent_name == \"RouteServers\":\n return routeServers()\n else:\n raise ValueError(\"Invalid intent\")",
"def on_intent(self):\n mcd = self._mcd\n print(\n self.LOG_CLASS,\n '[method: on_intent]',\n '[intent: ' + mcd.intent_name + ']',\n 'MyCityDataModel received:',\n mcd\n )\n\n # Check if the user is setting the address. This is special cased\n # since they may have been prompted for this info from another intent\n if mcd.intent_name == \"SetAddressIntent\":\n set_address_in_session(mcd)\n\n if intent_constants.ADDRESS_PROMPTED_FROM_INTENT \\\n in mcd.session_attributes:\n # User was prompted for address from another intent.\n # Set our current intent to be that original intent now that\n # we have set the address.\n mcd.intent_name = mcd.session_attributes[intent_constants.ADDRESS_PROMPTED_FROM_INTENT]\n print(\"Address set after calling another intent. Redirecting \"\n \"intent to {}\".format(mcd.intent_name))\n # Delete the session key indicating this intent was called\n # from another intent.\n del mcd.session_attributes[intent_constants.ADDRESS_PROMPTED_FROM_INTENT]\n else:\n return get_address_from_session(mcd)\n\n # session_attributes = session.get(\"attributes\", {})\n if mcd.intent_name == \"GetAddressIntent\":\n return get_address_from_session(mcd)\n elif mcd.intent_name == \"TrashDayIntent\":\n return request_user_address_response(mcd) \\\n if intent_constants.CURRENT_ADDRESS_KEY \\\n not in mcd.session_attributes \\\n else get_trash_day_info(mcd)\n elif mcd.intent_name == \"SnowParkingIntent\":\n return request_user_address_response(mcd) \\\n if intent_constants.CURRENT_ADDRESS_KEY \\\n not in mcd.session_attributes \\\n else get_snow_emergency_parking_intent(mcd)\n elif mcd.intent_name == \"GetAlertsIntent\":\n return get_alerts_intent(mcd)\n elif mcd.intent_name == \"AMAZON.HelpIntent\":\n return self.get_welcome_response()\n elif mcd.intent_name == \"AMAZON.StopIntent\" or \\\n mcd.intent_name == \"AMAZON.CancelIntent\":\n return self.handle_session_end_request()\n elif mcd.intent_name == \"UnhandledIntent\":\n return unhandled_intent(mcd)\n else:\n raise ValueError(\"Invalid intent\")",
"def _get_message(self):\n return self.__message",
"def on_intent(intent_request, session):\n\n\tprint(\"on_intent requestId=\" + intent_request['requestId'] +\n\t\t \", sessionId=\" + session['sessionId'])\n\n\tintent = intent_request['intent']\n\tintent_name = intent_request['intent']['name']\n\n\t# Sends the request to one of our intents\n\tif intent_name == \"sendVideoIntent\":\n\t\treturn sendVideo(intent, session)\n\telif intent_name == \"setVolumeIntent\":\n\t\treturn setVolume(intent, session)\n\telif intent_name == \"AMAZON.PauseIntent\":\n\t\treturn pauseVideo(intent, session)\n\telif intent_name == \"AMAZON.ResumeIntent\":\n\t\treturn resumeVideo(intent, session)\n\telif intent_name == \"AMAZON.HelpIntent\":\n\t\treturn get_welcome_response()\n\telif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n\t\treturn handle_session_end_request()\n\telse:\n\t\traise ValueError(\"Invalid intent\")"
]
| [
"0.7623119",
"0.6905368",
"0.6655187",
"0.65414625",
"0.63272935",
"0.61630195",
"0.61426914",
"0.6030031",
"0.5988024",
"0.5958836",
"0.5771847",
"0.56992733",
"0.5698487",
"0.561783",
"0.54841554",
"0.5475818",
"0.54735065",
"0.5444824",
"0.5429536",
"0.5429536",
"0.5413568",
"0.5390862",
"0.5369771",
"0.5369771",
"0.53373706",
"0.5294386",
"0.52731466",
"0.52626365",
"0.5251945",
"0.52506095"
]
| 0.72745353 | 1 |
Decorate a function as requiring sudo access | def privileged(f):
@wraps(f)
def wrapper(self, msg, *args, **kwargs):
if not Guard.allow_sudo(msg):
return
return f(self, msg, *args, **kwargs)
return wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def elevate_priv_if_needed(func):\n def inner(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except OSError as e:\n logger.debug('Elevating privileges due to receiving permission errror')\n logger.debug(e)\n return run_as_root(func)(*args, **kwargs)\n\n return inner",
"def require_admin(f):\n\n @require_login\n @wraps(f)\n def wrapper(*args, **kwds):\n if not api.user.get_user().get(\"admin\", False):\n raise PicoException(\n \"You do not have permission to access this resource\", 403\n )\n return f(*args, **kwds)\n\n return wrapper",
"def requires_admin(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if not users.is_current_user_admin():\n try:\n self.DenyAccess()\n except Exception, e:\n self.handle_exception(e, self.app.debug)\n finally:\n self.session_store.save_sessions(self.response)\n else:\n return f(self, *args, **kwargs)\n return wrapper",
"def test_edit_user_enable_permit_sudo(driver):\n pass",
"def dumba_non_public(function=None):\n actual_decorator = user_passes_test(lambda u: u.is_staff, login_url=None)\n if function:\n return actual_decorator(function)\n return actual_decorator",
"def admin_required(f): # pragma: no cover\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if current_user.admin:\r\n return f(*args, **kwargs)\r\n else:\r\n return abort(403)\r\n return decorated_function",
"def __wrapper(request, *args, **kwds):\n if request.user_is_admin:\n return func(request, *args, **kwds) # pylint: disable-msg=W0142\n else:\n return utility.forbidden(\n request,\n error_message='You must be an administrator to view this page.')",
"def __wrapper(request, *args, **kwds):\n if request.profile.is_superuser:\n return func(request, *args, **kwds) # pylint: disable-msg=W0142\n else:\n return utility.forbidden(\n request,\n error_message='You must be a superuser to view this page.')",
"def admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session['user']['user_type'] != \"admin\":\n return abort(403)\n return f(*args, **kwargs)\n return decorated_function",
"def non_admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == True:\n return jsonify({\"messsage\": \"Only Non admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper",
"def requires_admin(method):\r\n def wrapper(self, *args, **kwargs):\r\n user = users.get_current_user()\r\n if not user:\r\n if web.ctx.method == \"GET\":\r\n raise web.seeother(users.create_login_url(web.ctx.fullpath))\r\n raise web.forbidden()\r\n elif not (users.is_current_user_admin()):\r\n raise web.forbidden()\r\n else:\r\n return method(self, *args, **kwargs)\r\n return wrapper",
"def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.auth_manager.has_privilege(privilege):\n return view_func(request, *args, **kwargs)\n else:\n raise InsufficientPrivilegesException(required_privileges=[privilege])\n\n return _wrapped_view",
"def admin_required(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if current_user.is_admin:\n return func(*args, **kwargs)\n else:\n return login_manager.unauthorized()\n\n return wrapper",
"def sudo(command):\n return Effect(Sudo(command=command))",
"def check_admin(function_to_decorate):\r\n @wraps(function_to_decorate)\r\n def decorated_function(*args, **kwargs):\r\n if g.my['rank'] > 25:\r\n abort(401)\r\n return function_to_decorate(*args, **kwargs)\r\n return decorated_function",
"def require_privmsg():\n def add_attribute(func):\n if not hasattr(func, \"priv_msg\"):\n func.priv_msg = True\n return func\n return add_attribute",
"def require_admin(func: const.WebSocketCommandHandler) -> const.WebSocketCommandHandler:\n\n @wraps(func)\n def with_admin(\n hass: HomeAssistant, connection: ActiveConnection, msg: dict[str, Any]\n ) -> None:\n \"\"\"Check admin and call function.\"\"\"\n user = connection.user\n\n if user is None or not user.is_admin:\n raise Unauthorized()\n\n func(hass, connection, msg)\n\n return with_admin",
"def super_user_required(func):\n\n @functools.wraps(func)\n def __wrapper(request, *args, **kwds):\n \"\"\"Makes it possible for super_user_required to be used as a decorator.\"\"\"\n if request.profile.is_superuser:\n return func(request, *args, **kwds) # pylint: disable-msg=W0142\n else:\n return utility.forbidden(\n request,\n error_message='You must be a superuser to view this page.')\n\n return __wrapper",
"def private(func):\n func._private_ = True\n return func",
"def guest_access(func):\n def decorated(*_, **kwargs):\n public_profiles = current_app.config['USER_PUBLIC_PROFILES']\n if not public_profiles:\n if not current_user.is_authenticated:\n abort(401)\n elif current_user.id != kwargs['id']:\n abort(403)\n return func(**kwargs)\n\n return decorated",
"def might_need_auth(f):\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n\n if username is None:\n sys.exit(\"Please set a username (run `osf -h` for details).\")\n else:\n sys.exit(\"You are not authorized to access this project.\")\n\n return return_value\n\n return wrapper",
"def admin_required(func):\n\n @functools.wraps(func)\n def __wrapper(request, *args, **kwds):\n \"\"\"Makes it possible for admin_required to be used as a decorator.\"\"\"\n if request.user_is_admin:\n return func(request, *args, **kwds) # pylint: disable-msg=W0142\n else:\n return utility.forbidden(\n request,\n error_message='You must be an administrator to view this page.')\n\n return __wrapper",
"def admin_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n if request.user != config['server']['admin_username']:\n return web.json_response({'status': 'error', 'message': 'admin rights required'}, status=403)\n return func(request)\n return wrapper",
"def admin_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n identity = get_jwt_identity()\n if identity['role'] != 'admin':\n return jsonify({'message': 'Permission denied'}), 403\n else:\n return fn(*args, **kwargs)\n\n return wrapper",
"def admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == False:\n return jsonify({\"messsage\": \"Only admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper",
"def unprotected_method():\n return {\"message\": \"Anyone access this function\"}",
"def sudo():\n try:\n run('sudo whoami')\n return 'sudo'\n except:\n return ''",
"def superuser_only(view_func):\n def _inner(request, *args, **kwargs):\n if not request.user.is_superuser:\n raise PermissionDenied\n return view_func(request, *args, **kwargs)\n return _inner",
"def allow_remote_invocation(func, method='auto'):\r\n setattr(func, 'allow_rmi', method)\r\n return func",
"def octp_require_admin(f):\n\n @functools.wraps(f)\n def authed_only_wrapper(*args, **kwargs):\n if is_admin():\n return f(*args, **kwargs)\n else:\n if request.content_type == 'application/json':\n abort(403)\n else:\n # return redirect(url_for('auth.login', next=request.full_path))\n return render_template('page.html', content=\"You need to be admin to access this page!\")\n\n return authed_only_wrapper"
]
| [
"0.7411366",
"0.6413903",
"0.63163996",
"0.6282777",
"0.6280451",
"0.6224123",
"0.6188734",
"0.6127398",
"0.6112499",
"0.6107867",
"0.6105563",
"0.6062908",
"0.6052526",
"0.6051395",
"0.6020828",
"0.5980072",
"0.59735245",
"0.5961545",
"0.595655",
"0.5924637",
"0.59125936",
"0.5893509",
"0.58765537",
"0.5868495",
"0.5830307",
"0.58247894",
"0.5823131",
"0.58224005",
"0.5806723",
"0.57961553"
]
| 0.78824574 | 0 |
Create an instance of a map controller based on the regex match object | def from_match(match):
clat = float(match.group(1))
clng = float(match.group(2))
if match.group(3):
mlat = float(match.group(3))
mlng = float(match.group(4))
else:
mlat = mlng = None
zoom = float(match.group(5))
return MapController(clat, clng, zoom, mlat, mlng) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_re_match(cls, match):\n kwargs = match.groupdict()\n location = kwargs['location'].split()\n kwargs['location'] = (int(location[0]), int(location[1]),\n int(location[2]))\n return cls(**kwargs)",
"def from_re_match(cls, match):\n kwargs = match.groupdict()\n player_location = kwargs['player_location'].split()\n kwargs['player_location'] = (int(player_location[0]),\n int(player_location[1]),\n int(player_location[2]))\n target_location = kwargs['target_location'].split()\n kwargs['target_location'] = (int(target_location[0]),\n int(target_location[1]),\n int(target_location[2]))\n return cls(**kwargs)",
"def from_re_match(cls, match):\n kwargs = match.groupdict()\n player_location = kwargs['player_location'].split()\n kwargs['player_location'] = (int(player_location[0]),\n int(player_location[1]),\n int(player_location[2]))\n target_location = kwargs['target_location'].split()\n kwargs['target_location'] = (int(target_location[0]),\n int(target_location[1]),\n int(target_location[2]))\n if match.string.endswith('(headshot)'):\n kwargs['headshot'] = True\n return cls(**kwargs)",
"def __init__(self, regex, view):\n self.regex = re.compile(regex)\n self.view = view",
"def __init__(self):\n self.match_views = MatchView.MatchView()",
"def make_map():\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n \n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n\n # CUSTOM ROUTES HERE\n\n map.connect('/', controller='list', action='page', id=1)\n map.connect('/list/{action}_{id}', controller='list', requirements={'id': r'\\d+'})\n map.connect('/{controller}', action='index')\n map.connect('/{controller}/{action}')\n map.connect('/{controller}/{action}/{id}', requirements={'id': r'\\d+'})\n\n return map",
"def make_map():\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'],explicit=True)\n map.minimization = False\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n\n # CUSTOM ROUTES HERE\n \n map.connect('signout', '/signout', controller='account', action='signout')\n map.connect('signin', '/signin', controller='account', action='signin')\n map.connect('signinagain', '/signinagain', controller='account', action='signinagain')\n\n map.connect(\n '/page/{pageid}/{controller}/{action}',\n requirements=dict(pageid='\\d+')\n )\n map.connect(\n '/page/{pageid}/{controller}/{action}/{id}',\n requirements=dict(pageid='\\d+',id='\\d+')\n )\n map.connect('/{controller}/{action}')\n map.connect('/{controller}/{action}/{id}')\n map.connect('path', '*url', conditions={'function':parse}, _filter=build)\n\n return map",
"def __init__(self, match_re: Pattern[str], annotation_type: Annotation, window_type: Annotation=None):\n self.match_re = match_re\n self.annotation_type = annotation_type\n self.window_type = window_type",
"def route(self, regex, callback):\n self.__matcher.register(regex, callback)",
"def testRegexMapping(self):\n factory = service_handlers.ServiceHandlerFactory(Service)\n path, mapped_factory = factory.mapping('.*/my_service')\n\n self.assertEquals(r'(.*/my_service)' + service_handlers._METHOD_PATTERN, path)\n self.assertEquals(id(factory), id(mapped_factory))\n match = re.match(path, '/whatever_preceeds/my_service.my_method')\n self.assertEquals('/whatever_preceeds/my_service', match.group(1))\n self.assertEquals('my_method', match.group(2))\n match = re.match(path, '/something_else/my_service.my_other_method')\n self.assertEquals('/something_else/my_service', match.group(1))\n self.assertEquals('my_other_method', match.group(2))",
"def new():\n return ResearchMap()",
"def __init__(self, mapper):\n self.map = mapper\n self._router = routes.middleware.RoutesMiddleware(self._dispatch,\n self.map)",
"def make_map():\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n\n # CUSTOM ROUTES HERE\n\n # Browsing routes\n map.connect('/browse/letters/{id}/page', controller='browse', action='letters')\n map.connect('/browse/letters/{id}/page/', controller='browse', action='letters')\n map.connect('/browse/letters/{id}/page/{page}', controller='browse', action='letters', page=1)\n\n map.connect('/browse/categories/{id}/{name}', controller='browse', action='categories', name=None)\n map.connect('/browse/categories/{id}/{name}/', controller='browse', action='categories', name=None)\n map.connect('/browse/categories/{id}/{name}/page', controller='browse', action='categories', name=None)\n map.connect('/browse/categories/{id}/{name}/page/', controller='browse', action='categories', name=None)\n map.connect('/browse/categories/{id}/{name}/page/{page}', controller='browse', action='categories', page=1)\n\n map.connect('/browse/new', controller='browse', action='new', page=1)\n map.connect('/browse/new/', controller='browse', action='new', page=1)\n map.connect('/browse/new/page', controller='browse', action='new', page=1)\n map.connect('/browse/new/page/', controller='browse', action='new', page=1)\n map.connect('/browse/new/page/{page}', controller='browse', action='new', page=1)\n\n map.connect('/browse/popular', controller='browse', action='popular', page=1)\n map.connect('/browse/popular/', controller='browse', action='popular', page=1)\n map.connect('/browse/popular/page', controller='browse', action='popular', page=1)\n map.connect('/browse/popular/page/', controller='browse', action='popular', page=1)\n map.connect('/browse/popular/page/{page}', controller='browse', action='popular', page=1)\n\n # Search route\n map.connect('/search', controller='search', action='search')\n map.connect('/search/', controller='search', action='search')\n map.connect('search', '/search')\n\n # Default routes\n map.connect('/{controller}/{action}')\n map.connect('/{controller}/{action}/{id}')\n map.connect('/{controller}/{action}/{id}/')\n\n return map",
"def make_map(config):\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n map.explicit = False\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n \n map.connect('/', controller='index',action='index')\t \n map.connect('/monitor/{chart}/{points}/{endtime}', controller='monitor',action='graph')\n map.connect('/monitor/{chart}/{points}', controller='monitor',action='graph')\n map.connect('/overview/{catagory}/{points}', controller='overview',action='graph')\n map.connect('/warning_report', controller='warning_report',action='graph')\n map.connect('/zoom/{dbname}/{datasource}/{resolution}/{title}/{points}/{limit}/{description}', controller='zoom_graph',action='zoomin')\n\n\n # CUSTOM ROUTES HERE\n\n map.connect('/{controller}/{action}')\n map.connect('/{controller}/{action}/{id}')\n map.connect('/{controller}/{action}/{chart}/{value}')\n #map.connect('/{controller}{action}/{chart}/{type}/{points}')\n map.connect('/{controller}/{action}/{dbname}/{servicename}/{value}')\n\n return map",
"def createMatch(self, __matchName, __matchPassword, __beatmapID, __beatmapName, __beatmapMD5, __gameMode, __hostUserID):\n\t\t# Add a new match to matches list\n\t\tmatchID = self.lastID\n\t\tself.lastID+=1\n\t\tself.matches[matchID] = match.match(matchID, __matchName, __matchPassword, __beatmapID, __beatmapName, __beatmapMD5, __gameMode, __hostUserID)\n\t\treturn matchID",
"def url(regex, view):\n return RegexPattern(regex, view)",
"def setup_route(self):\n self._regex = re.compile(\n f\"^{self._pattern.sub(self.replace_field, self.route)}$\"\n )",
"def make_map(config):\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n\n # CUSTOM ROUTES HERE\n\n # Note that we need to specify the view and page if using the entry controller\n # as it is referenced in the __before__ of BaseController\n map.connect('/', controller='entry', action='index', view='latest', page=0)\n map.connect('/Latest', controller='entry', action='index', view='latest', page=0)\n map.connect('/Latest/{page}', controller='entry', action='index', view='latest')\n map.connect('/latest', controller='entry', action='index', view='latest', page=0)\n map.connect('/latest/{page}', controller='entry', action='index', view='latest')\n\n map.connect('/tag/{keyword}', controller='entry', action='tag', view='tag', page=0)\n map.connect('/tag/{keyword}/{page}', controller='entry', action='tag', view='tag')\n\n map.connect('/Download', controller='other', action='download')\n map.connect('/About', controller='other', action='download')\n map.connect('/download', controller='other', action='download')\n map.connect('/about', controller='other', action='download')\n\n map.connect('/{controller}/{action}')\n map.connect('/{controller}/{action}/{id}')\n\n return map",
"def make_map(config):\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n map.explicit = False\n\n mcon = map.connect\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n mcon('/error/{action}', controller='error')\n mcon('/error/{action}/{id}', controller='error')\n\n # CUSTOM ROUTES HERE\n mcon('/', controller='home', action='main')\n mcon('/search/:query', controller='home', action='search', conditions = dict(method=['POST', 'GET']))\n mcon('/games', controller='games', action='all_games')\n mcon('/games/:id', controller='games', action='game_info')\n mcon('/platforms', controller='platforms', action='all_platforms')\n mcon('/platforms/:id', controller='platforms', action='platform_info')\n\n return map",
"def new_map(self):\n self.wizard = NewMap(self)",
"def make_map(config):\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n map.explicit = False\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n\n map.connect('/', controller='public', action='index')\n\n map.connect('/redesign', controller='public', action='index2')\n\n map.connect('/stingyideas', controller='idea', action='idea_form', conditions=dict(method=[\"GET\"]))\n map.connect('/stingyideas', controller='idea', action='idea_submit', conditions=dict(method=[\"POST\"]))\n\n map.connect('/{controller}/{action}')\n map.connect('/{controller}/{action}/{id}')\n\n\n return map",
"def from_ofp_match(cls, match):\n return cls.from_items(match.items())",
"def __init__(self, pattern, response_dict):\r\n self._pattern = pattern\r\n self._response_dict = response_dict",
"def apply_match(regex_name, match):\n if regex_name is 'name':\n result['name'] = match.group(1)\n elif regex_name is 'single_start':\n result['startnodes'] = [int(match.group(1))]\n elif regex_name is 'multi_start':\n result['startnodes'] = ast.literal_eval(match.group(1))\n elif regex_name is 'groups':\n result['groups'] = ast.literal_eval(\n match.group(1).replace(\" \", \"\"))\n elif regex_name is 'comment':\n result['comment'] += match.group(1) + \"\\n\"\n elif regex_name is 'nodes':\n result['nodes'].append([int(float(match.group(2))),\n int(float(match.group(3)))])",
"def __init__(self, replacer, lookup, field=1, skipper=None):\n if isinstance(replacer, str):\n replacer = re.compile(replacer)\n if isinstance(skipper, str):\n skipper = re.compile(skipper)\n\n if not replacer.groups:\n raise DictReplacerError(\"Invalid replacer pattern: no groups \"\n \"specified - '%s'\" % replacer.pattern)\n\n self._replacer = replacer\n self._lookup = lookup\n self._field = field\n self._skipper = skipper",
"def init_app(self, app):\n\n app.url_map.converters['regex'] = RegexConverter",
"def __init__(self, pattern):\n self._pattern = re.compile(pattern)",
"def make_map(config):\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n map.explicit = False\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n\n # CUSTOM ROUTES HERE\n\n map.connect('/', controller='home', action='index')\n # TODO: how to do this the right way?\n map.connect('/stats', controller='stats', action='index')\n map.connect('/stats/', controller='stats', action='index')\n\n map.connect('/metrics', controller='metrics', action='index')\n map.connect('/metrics/', controller='metrics', action='index')\n\n map.connect('/external', controller='external', action='index')\n map.connect('/external/', controller='external', action='index')\n\n map.connect('/product/consumable', controller='consumable', action='list')\n map.connect('/product/consumable/', controller='consumable', action='list')\n \n map.connect('/product/consumable/{action}', controller='consumable')\n map.connect('/product/consumable/{action}/{id}', controller='consumable')\n\n for method in ('new', 'create', 'edit', 'save', 'delete', 'editplate', 'saveplate',\n 'list', 'plates', 'plate_template', 'plate_template_download', 'plate_upload', 'plate_do_upload', 'csfv_unhook'):\n map.connect('/product/batch/%s' % method, controller='product', action='batch_%s' % method)\n map.connect('/product/batch/%s/{id}' % method, controller='product', action='batch_%s' % method)\n\n for method in ('list','new', 'create', 'edit', 'save', 'delete', 'plates'):\n map.connect('/product/groove/%s' % method, controller='groove')\n map.connect('/product/groove/%s/{id}' % method, controller='groove')\n\n map.connect('/product', controller='product', action='index')\n map.connect('/product/', controller='product', action='index')\n\n map.connect('/metrics/history_csv/{box_code}/{plate_type}', controller='metrics', action='history_csv', reprocess_config_id='')\n map.connect('/metrics/{action}/{id}', controller='metrics', mode='group', reprocess_config_id=None)\n map.connect('/metrics/{action}/{id}/', controller='metrics', mode='group', reprocess_config_id=None)\n map.connect('/metrics/{action}/{id}/{reprocess_config_id}', mode='group', controller='metrics')\n map.connect('/drmetrics/{action}/{id}', controller='metrics', mode='dr', reprocess_config_id='')\n map.connect('/platemetrics/{action}/{id}', controller='metrics', mode='plate', reprocess_config_id='')\n map.connect('/platemetrics/{action}/{id}/{reprocess_config_id}', controller='metrics', mode='plate')\n map.connect('/statsmetrics/{action}/{id}', controller='metrics', mode='plate_non_cert', reprocess_config_id='')\n map.connect('/statsmetrics/{action}/{id}/{reprocess_config_id}', controller='metrics', mode='plate_non_cert')\n map.connect('/metrics/history/{box_code}/{plate_type}/{plate_order}', controller='metrics', action='history', mode='plate', reprocess_config_id='')\n \n #map.connect('/metrics/wells/{id}', controller='metrics', action='wells', reprocess_config_id=None)\n #map.connect('/metrics/wells/{id}/{reprocess_config_id}', controller='metrics', action='wells')\n \n map.connect('/stats/boxes/day/{year}/{month}/{day}', controller='stats', action='boxes_by_day')\n map.connect('/stats/boxes', controller='stats', action='boxes_by_week', weeks_ago=0)\n map.connect('/stats/boxes/week/{weeks_ago}', controller='stats', action='boxes_by_week', requirements={'weeks_ago': '\\d+'})\n \n map.connect('/stats/operators/day/{year}/{month}/{day}', controller='stats', action='operators_by_day')\n map.connect('/stats/operators', controller='stats', action='operators_by_week', weeks_ago=0)\n map.connect('/stats/operators/week/{weeks_ago}', controller='stats', action='operators_by_week', requirements={'weeks_ago': '\\d+'})\n\n # TODO clean this up with submappers\n map.connect('/status/readers', controller='admin', action='readers', admin=False)\n map.connect('/admin/readers', controller='admin', action='readers', admin=True)\n map.connect('/status/colorcomp', controller='admin', action='colorcomp', admin=False)\n map.connect('/admin/colorcomp', controller='admin', action='colorcomp', admin=True)\n map.connect('/status/readers/prod', controller='admin', action='prod', admin=False)\n map.connect('/admin/readers/prod', controller='admin', action='prod', admin=True)\n map.connect('/status/modules/prod', controller='admin', action='modules', admin=False)\n map.connect('/admin/modules/prod', controller='admin', action='modules', admin=True)\n map.connect('/status/detectors/prod', controller='admin', action='detectors', admin=False)\n map.connect('/admin/detectors/prod', controller='admin', action='detectors', admin=True)\n map.connect('/status/readers/table', controller='admin', action='reader_status_table', admin=False)\n map.connect('/admin/readers/table', controller='admin', action='reader_status_table', admin=True)\n map.connect('/status/reader_history/{id}', controller='admin', action='reader_history', admin=False)\n map.connect('/admin/reader_history/{id}', controller='admin', action='reader_history', admin=True)\n map.connect('/status/reader_summary/{id}', controller='admin', action='reader_summary', admin=False)\n map.connect('/admin/reader_summary/{id}', controller='admin', action='reader_summary', admin=True)\n\n # manual. whoo\n map.connect('/sample/cnv/new', controller='sample', action=\"cnv_new\")\n map.connect('/sample/cnv/create', controller='sample', action=\"cnv_create\")\n map.connect('/sample/cnv/edit', controller='sample', action=\"cnv_edit\")\n map.connect('/sample/cnv/save', controller='sample', action=\"cnv_save\")\n map.connect('/sample/cnv/saved', controller='sample', action=\"cnv_saved\")\n map.connect('/sample/cnv/delete', controller='sample', action=\"cnv_delete\")\n\n # manual again, whoo -- go on scaffold?\n map.connect('/assay/enzyme/new', controller='assay', action='enzyme_conc_new')\n map.connect('/assay/enzyme/create', controller='assay', action='enzyme_conc_create')\n map.connect('/assay/enzyme/edit', controller='assay', action='enzyme_conc_edit')\n map.connect('/assay/enzyme/save', controller='assay', action='enzyme_conc_save')\n map.connect('/assay/enzyme/delete', controller='assay', action='enzyme_conc_delete')\n\n map.connect('/nplot/dg/runtime/bydate/{dg_id}', controller='nplot', action='dg_trend', prop='vacuum', view='time')\n map.connect('/nplot/dg/runtime/byrun/{dg_id}', controller='nplot', action='dg_trend', prop='vacuum', view='runs')\n map.connect('/nplot/dg/pressure/bydate/{dg_id}', controller='nplot', action='dg_trend', prop='pressure', view='time')\n map.connect('/nplot/dg/pressure/byrun/{dg_id}', controller='nplot', action='dg_trend', prop='pressure', view='runs')\n map.connect('/nplot/dg/spike/bydate/{dg_id}', controller='nplot', action='dg_trend', prop='spike', view='time')\n map.connect('/nplot/dg/spike/byrun/{dg_id}', controller='nplot', action='dg_trend', prop='spike', view='runs')\n\n map.connect('/plate/well_attr_csv/{id}/{attribute}', controller='plate', action='well_attr_csv')\n map.connect('/plate/well_metric_csv/{id}/{attribute}', controller='plate', action='well_metric_csv')\n map.connect('/plate/channel_attr_csv/{id}/{attribute}', controller='plate', action='channel_attr_csv')\n map.connect('/plate/channel_metric_csv/{id}/{attribute}', controller='plate', action='channel_metric_csv')\n map.connect('/plate/list/{id}', controller='plate', action='list_box2')\n\n map.connect('/well/gated/{id}', controller='well', action='threshold', show_only_gated=False)\n map.connect('/well/accepted_peak_csv/{id}', controller='well', action='peak_csv', show_only_gated=True)\n map.connect('/well/all_peak_csv/{id}', controller='well', action='peak_csv', show_only_gated=False)\n map.connect('/well/amphist/{id}/{channel_num}', controller='well', action='amphist')\n map.connect('/well/amptime/{id}/{channel_num}', controller='well', action='amptime')\n map.connect('/well/conc_trend/{id}/{channel_num}', controller='well', action='conc_trend')\n map.connect('/well/temporal_galaxy/{id}/{channel_num}', controller='well', action='temporal_galaxy')\n map.connect('/well/air_plot/{id}/{channel_num}', controller='well', action='air_plot')\n map.connect('/well/air_hist/{id}/{channel_num}', controller='well', action='air_hist')\n\n # TODO still seems kludgy\n map.connect('/setup', controller='setup', action='index')\n map.connect('/setup/', controller='setup', action='index')\n map.connect('/beta/{action}', controller='setup', beta=True)\n map.connect('/beta/{action}/{id}', controller='setup', beta=True)\n map.connect('/setup/{action}', controller='setup', beta=False)\n map.connect('/setup/{action}/{id}', controller='setup', beta=False)\n\n map.connect('/assay/new/{action}', controller='sequence', flow='sequence.new')\n map.connect('/assay/edit/{action}', controller='sequence', flow='sequence.edit')\n map.connect('/assay/approved', controller='sequence', action='approved_list')\n map.connect('/assay/group', controller='assay_group', action='list')\n map.connect('/assay/group/{action}', controller='assay_group')\n map.connect('/assay/view/{id}', controller='sequence', action='view_details')\n map.connect('/assay/view/{id}/sequences', controller='sequence', action='view_sequences')\n map.connect('/assay/view/{id}/plates', controller='sequence', action='view_plates')\n map.connect('/assay/view/{id}/validation', controller='sequence', action='view_validation')\n map.connect('/assay/{action}', controller='sequence')\n map.connect('/assay/{action}/{id}', controller='sequence')\n \n map.connect('/{controller}/{action}')\n map.connect('/{controller}/{action}/{id}')\n\n return map",
"def new_map(self):\n self.map = Map()\n self.player.roomId = 0\n return self.map",
"def make_map(config):\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n map.explicit = False\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n\n # CUSTOM ROUTES HERE\n\n # Map the /admin url to FA's AdminController\n # Index page\n map.connect('admin', '/', controller='admin', action='models')\n # Models\n map.resource('model', 'models', path_prefix='/{model_name}', controller='admin')\n\n return map"
]
| [
"0.642154",
"0.62663186",
"0.613197",
"0.6051899",
"0.58634245",
"0.5823124",
"0.5723135",
"0.5703356",
"0.5665067",
"0.564505",
"0.557083",
"0.5539005",
"0.54985327",
"0.54926795",
"0.54654515",
"0.54522747",
"0.5433033",
"0.5412259",
"0.53442377",
"0.5310062",
"0.53073347",
"0.52895194",
"0.52788144",
"0.5260251",
"0.5182743",
"0.51780367",
"0.5152239",
"0.5135581",
"0.51233625",
"0.51209885"
]
| 0.7379016 | 0 |
Returns the world map image This method caches the result the first time it is called. | def get_world_image(cls):
if cls.map_image is None:
with open(str(Path(RES_PATH, cls.map_path)), 'rb') as infile:
cls.map_image = Image.open(infile).convert('RGBA')
return cls.map_image | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def generate_snapshot(self, include_world=True):\n if self.has_marker:\n y, x = -self.mlat, self.mlng\n else:\n y, x = -self.clat, self.clng\n zoom = self.zoom\n world = MapController.get_world_image()\n top, bottom = y - 256/(2**zoom), y + 256/(2**zoom)\n left, right = x - 256/(2**zoom), x + 256/(2**zoom)\n logging.info(f'Cropping world at {left} {top} {right} {bottom}')\n snapshot = world.crop((left, top, right, bottom))\n if top - bottom <= 256:\n snapshot = snapshot.resize((256, 256), resample=Image.NEAREST)\n else:\n snapshot = snapshot.resize((256, 256), resample=Image.BILINEAR)\n\n if self.has_marker:\n marker = MapController.get_marker_image()\n snapshot.paste(marker, (112, 96), marker.getchannel('A'))\n\n if include_world:\n # Expand the canvas and put the world map under the inset\n result = Image.new('RGBA', (256, 400))\n result.paste(snapshot)\n result.paste(world.resize((256, 144)), (0, 256))\n\n # Draw a marker at the same place at the world map\n if self.has_marker:\n result.paste(marker, (int(x//32)-16, int(y//32)-32+256), marker.getchannel('A'))\n\n # Draw an overlay indicating the inset on the world map\n overlay = Image.new('RGBA', (256, 400))\n draw = ImageDraw.Draw(overlay)\n fill_color = (255, 255, 0, 64) # Transparent yellow\n outline_color = (255, 255, 0, 96) # More solid yellow\n draw.rectangle((max(0, int(left//32)),\n max(0, int(top//32))+256,\n min(256, int(right//32)),\n min(144, int(bottom//32))+256),\n fill=fill_color,\n outline=outline_color,\n width=1)\n draw.line((0, 256, max(0, int(left//32)), min(144, int(bottom//32))+256), (255, 255, 0, 96), 1)\n draw.line((256, 256, min(256, int(right//32)), min(144, int(bottom//32))+256), (255, 255, 0, 96), 1)\n result = Image.alpha_composite(result, overlay)\n else:\n result = snapshot\n \n output = BytesIO()\n result.save(output, format='png')\n output.seek(0)\n return output",
"def toworld(self, *args, **kwargs):\n return _image.image_toworld(self, *args, **kwargs)",
"def get_map(self):\n return self.get_raw_ys()",
"def build_map(self):\n # Initialize the world map\n self.world_map = np.zeros((self.map_size, self.map_size))\n \n # Subscribe data and process them in the callback func\n sonar_sub = message_filters.Subscriber('/RosAria/sonar', PointCloud)\n pose_sub = message_filters.Subscriber('/RosAria/pose', Odometry)\n\n time_sync = message_filters.TimeSynchronizer([sonar_sub, pose_sub], queue_size=10)\n time_sync.registerCallback(self.callback_map)\n \n # show map interactively\n rospy.sleep(1)\n while not rospy.is_shutdown():\n cv2.imshow('world_map', self.world_prob)\n cv2.waitKey(100)\n\n if self.save_map and self.count%1000==0:\n with open(self.map_file, 'w') as f:\n pickle.dump(self.world_prob, f)\n print(\"=== Save map to {} ===\".format(self.map_file))",
"def world(cls):\n try:\n return cls._world_data\n except AttributeError:\n cls.create_the_world()\n return cls._world_data",
"def get_map(self, name, return_type='image'):\n m = self.maps.get(name)\n if m is None:\n raise ValueError(\"No map with name '{}' found.\".format(name))\n return self.masker.inverse_transform(m) if return_type == 'image' else m",
"def d3_world_map():\n\n return render_template(\"world.html\")",
"def get_map(self):\n return self.map",
"def entire_map(cls, world):\n return cls(0, world.xdim, 0, world.ydim)",
"def map( self ) :\n\n self.readMap( )\n\n return( self.__map )",
"def _load_img(self, name):\n try:\n img_path = os.path.join(global_var.PATH, \"maps\", name + \".png\")\n env_img = pygame.image.load(img_path)\n except Exception as e:\n print(e)\n print(\"Environment\", name, \"does not exist. Make sure that a PNG image exists\",\n \"under that name in the \\\"maps\\\" folder.\")\n sys.exit()\n\n return env_img",
"def draw(self):\n if not self._folium_map:\n self._set_folium_map()\n return self._folium_map",
"def getGoogleMap(self, lat, lng, wTileN, hTileN, zoom):\n start_x, start_y = self.getStartTlXY(lat, lng, zoom)\n width, height = 256 * wTileN, 256 * hTileN\n map_img = Image.new('RGB', (width, height))\n for x in range(0, wTileN):\n for y in range(0, hTileN):\n url = 'https://mt0.google.com/vt?x=' + \\\n str(start_x+x)+'&y='+str(start_y+y)+'&z='+str(zoom)\n current_tile = str(x)+'-'+str(y)\n urllib.request.urlretrieve(url, current_tile)\n map_img.paste(Image.open(current_tile), (x*256, y*256))\n os.remove(current_tile)\n return map_img",
"def world(self):\n return self.worlds.get(self.world_index)",
"def get_map(self):\n return self._locmap",
"def _standard_mapping(self):\n mapping_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/mapping.mat'))\n self.camvidMap = mapping_raw['camvidMap'] * 255\n self.cityscapesMap = mapping_raw['cityscapesMap'] * 255",
"def get_test_floor_map_image(self):\n return SimpleUploadedFile(\n \"test_image.png\",\n content=open(\n \"{}/maps/wing_l.png\".format(MEDIA_ROOT), 'rb').read(),\n content_type='image/png')",
"def test_render_world_map():\n gdpinfo = gdpinfo_dict()\n codeinfo = codeinfo_dict()\n pygal_countries = pygal_country_dict()\n\n # 1960\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1960\", \"isp_gdp_world_code_1960.svg\")\n\n # 1980\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1980\", \"isp_gdp_world_code_1980.svg\")\n\n # 2000\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2000\", \"isp_gdp_world_code_2000.svg\")\n\n # 2010\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2010\", \"isp_gdp_world_code_2010.svg\")",
"def map_data(cult):\n try: # Map already exists\n underworld_model = Underworld.objects.get(owner=cult)\n field = generate_map(underworld_model.seed)\n except Underworld.DoesNotExist: # Generate new map\n # Create a new random seed every time we create an Underworld map\n seed = ''.join(random.choice(ascii_letters + digits) for _ in range(32))\n field = generate_map(seed)\n underworld_model = Underworld(owner=cult, seed=seed, x=field['x'], y=field['y'], time=0)\n underworld_model.save()\n\n print('### ########################### Seed used: ' + underworld_model.seed)\n \n return underworld_model, field",
"def test_render_world_map():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n codeinfo = {\n \"codefile\": \"isp_country_codes.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"plot_codes\": \"ISO3166-1-Alpha-2\",\n \"data_codes\": \"ISO3166-1-Alpha-3\"\n }\n\n # Get pygal country code map\n pygal_countries = pygal.maps.world.COUNTRIES\n\n # 1960\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1960\", \"isp_gdp_world_code_1960.svg\")\n\n # 1980\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1980\", \"isp_gdp_world_code_1980.svg\")\n\n # 2000\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2000\", \"isp_gdp_world_code_2000.svg\")\n\n # 2010\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2010\", \"isp_gdp_world_code_2010.svg\")",
"def test_render_world_map():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n codeinfo = {\n \"codefile\": \"isp_country_codes.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"plot_codes\": \"ISO3166-1-Alpha-2\",\n \"data_codes\": \"ISO3166-1-Alpha-3\"\n }\n\n # Get pygal country code map\n pygal_countries = pygal.maps.world.COUNTRIES\n\n # 1960\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1960\", \"isp_gdp_world_code_1960.svg\")\n\n # 1980\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"1980\", \"isp_gdp_world_code_1980.svg\")\n\n # 2000\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2000\", \"isp_gdp_world_code_2000.svg\")\n\n # 2010\n render_world_map(gdpinfo, codeinfo, pygal_countries, \"2010\", \"isp_gdp_world_code_2010.svg\")",
"def getWorld(self):\n return self.world",
"def getImage(self, point):\n if self.map[point.y,point.x] != None:\n return self.map[point.y,point.x].getItemImage()",
"def save_world(self):\n pass",
"def map_image(res):\n # constants\n MAP_URL = \"https://maps.googleapis.com/maps/api/staticmap\"\n SIZE = \"400x400\"\n\n polygon_path = mh.get_polygon_path(res)\n origin = mh.get_latlon(mh.get_origin(res))\n destination = mh.get_latlon(mh.get_destination(res))\n params = {\n \"size\": SIZE,\n \"path\": f\"enc:{polygon_path}\",\n \"markers\": [f\"color:red|label:X|{destination}\", f\"size:small|color:blue|{origin}\"],\n \"key\": key\n }\n img_resp = requests.get(url=MAP_URL, params=params)\n return img_resp.url",
"def render_map(self):\n # first we create a blank image, on which we will draw the base map\n width = self.image_size[0]\n height = self.image_size[1]\n # ex: size of the image 1080 height, 1920 width, 3 channels of colour\n base_map = np.zeros((height, width, 3), np.uint8)\n base_map[:, :] = self.background_color\n\n # we draw each shape of the dictionary on the blank image\n for shape_id in self.shape_dict_filt:\n shape = self.shape_dict_filt[shape_id]\n points = shape.points\n pts = np.array(points, np.int32)\n cv2.polylines(base_map, [pts], True, shape.color_line,\n shape.line_thick, cv2.LINE_AA)\n\n self.map_file = base_map",
"def img_map(ts):\n image_map = \"\"\n texdata = bpy.data.textures[ts.texture]\n if ts.mapping == \"FLAT\":\n image_map = \"map_type 0 \"\n elif ts.mapping == \"SPHERE\":\n image_map = \"map_type 1 \"\n elif ts.mapping == \"TUBE\":\n image_map = \"map_type 2 \"\n\n # map_type 3 and 4 in development (?) (ENV in pov 3.8)\n # for POV-Ray, currently they just seem to default back to Flat (type 0)\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 3 \"\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 4 \"\n if ts.use_interpolation: # Available if image sampling class reactivated?\n image_map += \" interpolate 2 \"\n if texdata.extension == \"CLIP\":\n image_map += \" once \"\n # image_map += \"}\"\n # if ts.mapping=='CUBE':\n # image_map+= \"warp { cubic } rotate <-90,0,180>\"\n # no direct cube type mapping. Though this should work in POV 3.7\n # it doesn't give that good results(best suited to environment maps?)\n # if image_map == \"\":\n # print(\" No texture image found \")\n return image_map",
"def small_map(self):\n self.map_url = \"https://maps.googleapis.com/maps/api/staticmap?center={},{}&zoom=12&size=350x350&key={}\".format(self.lat, self.lng, api_key) \n return (self.map_url)",
"def readMap(self):\n MapImage = Image.open(self.MapImageFile).convert(\"RGB\")\n MapImageWidth, MapImageHeight = MapImage.size\n self.Width, self.Height = MapImageWidth // self.TileWidth, MapImageHeight // self.TileHeight\n progress = -1\n\n for y in range(self.Height):\n for x in range(self.Width):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = MapImage.crop(box)\n self.List.append(self.TileSet.findTile(tile))\n\n # Calculate the progress, and print it to the screen.\n p = ((x + y * self.Width) * 100) / (self.Width * self.Height)\n if progress != p:\n progress = p\n self.printProgress(progress)\n\n self.printProgress(100)",
"def new_map(self):\n self.map = Map()\n self.player.roomId = 0\n return self.map"
]
| [
"0.66868144",
"0.6528561",
"0.64764446",
"0.6316041",
"0.6301161",
"0.6254141",
"0.62515",
"0.6232758",
"0.6192258",
"0.6036638",
"0.6019334",
"0.60071325",
"0.5981669",
"0.5949518",
"0.5942885",
"0.58881766",
"0.5847239",
"0.5834663",
"0.5819538",
"0.57475656",
"0.57475656",
"0.5721719",
"0.570434",
"0.56902343",
"0.5688089",
"0.568564",
"0.5672967",
"0.56690043",
"0.5633946",
"0.5611906"
]
| 0.84329826 | 0 |
Returns the marker image This method caches the result the first time it is called. | def get_marker_image(cls):
if cls.marker_image is None:
with open(str(Path(RES_PATH, cls.marker_path)), 'rb') as infile:
cls.marker_image = Image.open(infile).convert('RGBA').resize((32, 32))
return cls.marker_image | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getImage(self, point):\n if self.map[point.y,point.x] != None:\n return self.map[point.y,point.x].getItemImage()",
"def get_image(self):\n if not hasattr(self, '_BasePublication__image_cache'):\n images = self.get_images()\n self.__image_cache = images[0].picture if images else None\n return self.__image_cache",
"def get_image(self):\n self.drawer.flush()\n return self.img",
"def get_image(self):\n return self.image",
"def get_image(self):\n return self.image",
"def get_image(self):\n return self.image",
"def get_image(self, address):\r\n # Do a bit of caching\r\n if self.last_image and self.last_image.contains(address):\r\n return self.last_image\r\n \r\n # if it was not cached, traverse all of the loaded images\r\n for image in self.loaded_images:\r\n if image.contains(address):\r\n self.last_image = image\r\n return image\r\n \r\n return None",
"def get_current_image(self):\n raise NotImplementedError",
"def getimage(self):",
"def get_image(self):\n return self.process_raw_image(self.get_raw_image())",
"def get_image(self):\n image = None\n if self.image_path:\n image=ImageTk.PhotoImage(ImageOps.fit(\n Image.open(resolve_path(self.image_path)),self.size or (32,32)))\n self._hack.append(image)\n\n return image",
"def image(self):\n return self._image",
"def get_new_image(self):\n return self.vid_mem_reader.get_latest_image()[0]",
"def find_image_url(lat_value, long_value):\n global custom_memory, custom_hit, custom_miss, total_custom_memory\n image_tuple = (lat_value, long_value)\n \n #When Latitude Longitude in Cache and HIT\n if image_tuple in custom_memory:\n custom_hit+=1\n custom_memory[image_tuple][1] = datetime.now()\n return custom_memory[image_tuple][0],\"hit\"\n \n #When Latitude Longitude NOT in Cache and MISS\n if len(custom_memory) < total_custom_memory:\n custom_miss+=1\n custom_memory[image_tuple] = [GetImageURL(*image_tuple), datetime.now()]\n return custom_memory[image_tuple][0], \"miss_when_not_full\"\n else:\n custom_memory = sorted([(key, list_vals) for key, list_vals in custom_memory.items()], key=lambda i:i[1][1], reverse=False)\n del custom_memory[0]\n custom_memory = dict(custom_memory)\n custom_miss+=1\n custom_memory[image_tuple] = [GetImageURL(*image_tuple), datetime.now()]\n return custom_memory[image_tuple][0], \"miss_when_after_full\"",
"def get_image(self):\n return self.camera.getImage()",
"def get_local_image(self, src):\r\n local_image = ImageUtils.store_image(None,\r\n self.link_hash, src, self.config)\r\n return local_image",
"def get_image ( self, object ):\n return self.image",
"def get_image(self):\n image = Image.new('1', (8, 16))\n draw = ImageDraw.Draw(image)\n for x in xrange(8):\n for y in xrange(16):\n draw.point((x,y),self.get_pixel(x, y))\n return image",
"def getImage( self ):\n return self.__image;",
"def cache(self):\n\t\tprint self.url\n\t\tif self.url and not self.streetimage:\n\t\t\tresult = urllib.urlretrieve(self.url)\n\t\t\tfname = os.path.basename(self.url).split('&')[-1]+\".jpg\"\n\t\t\tprint 'fname = ', fname, 'result = ', result\n\t\t\tself.streetimage.save(fname, File(open(result[0])))\n\t\t\tself.save()",
"def get_image():\n return models.Image.objects.all()[0]",
"def GetImage(self):\r\n\r\n return self._image",
"def get_image(self, country: str) -> PngImageFile:\n url = self.__url_dict[country]\n if self.__high_res:\n url = self.__get_high_res_url(country)\n\n file_path = f\"flag_cache/{self.__res_str}/{country}.png\"\n try:\n return Image.open(file_path)\n except IOError:\n print(f\"> Getting Flag of {country}: {url}\")\n return self.get_image_from_url(url, file_path)",
"def get_hit_marker(self):\r\n return Marker((0, 0, 0), self._screen)",
"def image(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"image\")",
"def get_marker(annotation, marker_duration, video_clip):\n position = annotation[\"position\"]\n marker = marker_image.copy()\n marker = marker.set_position(get_marker_absolute_pos(position, video_clip))\n marker = marker.set_start(float(annotation[\"time\"]) / 1000.0)\n marker = marker.set_duration(marker_duration)\n\n return marker",
"def get_image():\n\n # Access the global variable and activate the saving for the last camera's\n # frame\n global _save_image\n _save_image = True",
"def image(self):\n if self.hasImage():\n return self._image.pixmap().toImage()\n return None",
"def image(self, where):\n cook = cookie()\n I = Image(cook, self)\n self.call('image', cook, where)\n print(\"IMAGE\", where)\n return I",
"def image(self):\n if self.hasImage():\n return self._pixmapHandle.pixmap().toImage()\n return None"
]
| [
"0.6773873",
"0.6567225",
"0.6480415",
"0.63177365",
"0.63177365",
"0.63177365",
"0.63121176",
"0.6306062",
"0.6303342",
"0.6243714",
"0.61313015",
"0.6050443",
"0.6037759",
"0.60189766",
"0.6010048",
"0.59817874",
"0.59225345",
"0.59153646",
"0.591476",
"0.5906588",
"0.588026",
"0.5878646",
"0.58780485",
"0.5877627",
"0.5869242",
"0.5866321",
"0.58338165",
"0.5824229",
"0.5817006",
"0.58154714"
]
| 0.788136 | 0 |
Sleep until the specified datetime | async def wait_until(dt):
now = datetime.now()
await sleep((dt - now).total_seconds()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sleep_until(self, time):\n raise NotImplementedError()",
"def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)",
"def sleep_until(hour, minute=0):\n import datetime\n from time import sleep\n\n now = datetime.datetime.now()\n to = (now + datetime.timedelta(days=1)).replace(hour=hour, minute=minute, second=0)\n duration = (to - now).seconds\n sleep(duration)",
"def sleep_for(self, duration):\n raise NotImplementedError()",
"def sleep(self):\n # Just spending cycles of sleep till next date\n timeTarget = self.startTime + timedelta(hours=int(self.newsFrequency))\n while datetime.now() < timeTarget:\n # sleep for 30 min\n # TODO move time to sleep into config\n logging.info(f\"Sleep for 30 min target to wakeup {timeTarget}\")\n time.sleep(60*30)",
"async def sleep(cls, delay: float) -> None:",
"def wake_till(seconds):\n while True:\n if int(time.time()) < seconds:\n time.sleep(5)\n else:\n return",
"def sleep(interval):\n time.sleep(interval) # pragma: no cover",
"def sleep(sleep_time=0.250):\n time.sleep(sleep_time)",
"def wait(self, sleep_time):\n time.sleep(sleep_time)",
"async def sleep(self, sleep_time):\n await asyncio.sleep(sleep_time)",
"def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'",
"def sleep(self):\n current_time = time.time()\n\n if not self.next_time: # first call\n self.next_time = current_time + self.period\n return\n\n delta = self.next_time - current_time\n if delta > 0:\n time.sleep(delta)\n self.next_time += self.period",
"def waitUntil(t: Time_t) -> bool:\n now = dt.datetime.now(t.tzinfo)\n secs = (_fillDate(t) - now).total_seconds()\n run(asyncio.sleep(secs))\n return True",
"def sleep(seconds):\r\n time.sleep(seconds)",
"def sleep(seconds):\n time.sleep(seconds)",
"def sleep(seconds):\n time.sleep(seconds)",
"def simulate_until(self, stop_time, dt=None):\n while self.t < stop_time:\n self.step(dt)",
"def deepsleep(time_ms: int = None) -> None:",
"def _sleep(self):\n while 1:\n diff = (time.time()-self.lastcall) - self.mindelay\n if diff >= 0: return\n time.sleep(max(-diff/2.0, 0.01))",
"def _sleep(self, sleep_time: float = 10) -> None:\n sleep_until_interrupt(sleep_time, lambda: self.stopped, interval=0.5)",
"def pause_until(time):\n end = time\n\n # Convert datetime to unix timestamp and adjust for locality\n if isinstance(time, datetime):\n # If we're on Python 3 and the user specified a timezone, \n # convert to UTC and get tje timestamp.\n if sys.version_info[0] >= 3 and time.tzinfo:\n end = time.astimezone(timezone.utc).timestamp()\n else:\n zoneDiff = pytime.time() - (datetime.now() - datetime(1970, 1, 1)).total_seconds()\n end = (time - datetime(1970, 1, 1)).total_seconds() + zoneDiff\n\n # Type check\n if not isinstance(end, (int, float)):\n raise Exception('The time parameter is not a number or datetime object')\n\n # Now we wait\n while True:\n now = pytime.time()\n diff = end - now\n\n #\n # Time is up!\n #\n if diff <= 0:\n break\n else:\n # 'logarithmic' sleeping to minimize loop iterations\n sleep(diff / 2)",
"def sleep(sleep_period):\r\n\r\n # Get current time\r\n now = datetime.datetime.now()\r\n # Set sleep time for no game today\r\n if \"day\" in sleep_period:\r\n delta = datetime.timedelta(hours=12)\r\n # Set sleep time for not in season\r\n elif \"season\" in sleep_period:\r\n # If in August, 31 days else 30\r\n if now.month is 8:\r\n delta = datetime.timedelta(days=31)\r\n else:\r\n delta = datetime.timedelta(days=30)\r\n next_day = datetime.datetime.today() + delta\r\n next_day = next_day.replace(hour=12, minute=10)\r\n sleep = next_day - now\r\n sleep = sleep.total_seconds()\r\n time.sleep(sleep)",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass",
"def user_wait(self, duration):\n self.enqueue(lambda t: sleep(duration + int(PY3)))",
"def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()",
"async def sleep(self, seconds):\n await self._sleep_until_nanos(_get_future_nanos(seconds))"
]
| [
"0.7860681",
"0.75957644",
"0.7344111",
"0.7316204",
"0.71682894",
"0.71541137",
"0.71498096",
"0.7025248",
"0.69865376",
"0.6975623",
"0.69485664",
"0.6945424",
"0.68780625",
"0.68588567",
"0.6832199",
"0.6821095",
"0.6821095",
"0.68148077",
"0.68145597",
"0.67563605",
"0.67473483",
"0.6742897",
"0.6706049",
"0.66871667",
"0.66871667",
"0.66871667",
"0.6671936",
"0.6617034",
"0.6614146",
"0.6589579"
]
| 0.803321 | 0 |
Schedule sending the status of the bot to author's DM | async def schedule_status():
while True:
if controller.scheduled_status_date is not None:
return
controller.scheduled_status_date = datetime.now()+timedelta(hours=23)
await wait_until(controller.scheduled_status_date)
channel = await client.fetch_channel(Guard.AUTHOR_DM)
await channel.send(**{
'content': controller.get_status(),
})
controller.scheduled_status_date = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def status(self, msg, *args):\n content = self.get_status()\n await msg.channel.send(**{\n 'content': content,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })",
"async def schedule_activity():\n if controller.scheduled_activity_date is not None:\n return\n controller.scheduled_activity_date = datetime.now()+timedelta(seconds=30)\n await wait_until(controller.scheduled_activity_date)\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='for ~command'))",
"def send_reminder(self):\n pass",
"async def __send_alarm(self, context: ContextTypes.DEFAULT_TYPE) -> None:\n if self.door_status.update_status():\n await context.bot.send_message(\n MESKOID,\n text=f\"🐙{self.door_status.last_line}\",\n )\n await context.bot.send_message(\n QKZKID,\n text=f\"🐙{self.door_status.last_line}\",\n )\n elif self.__verbose:\n await context.bot.send_message(\n context.job.chat_id,\n text=f\"🚀unedited - {self.door_status.last_edit}.\",\n )",
"async def status(self, ctx:utils.Context, status:str):\n\n status_o = getattr(discord.Status, status.lower())\n await self.bot.change_presence(activity=self.bot.guilds[0].me.activity, status=status_o)",
"async def _uptime(self, ctx):\n async with ctx.typing():\n uptimestr = botuptime(ctx.bot)\n e = discord.Embed(description=f\"{ctx.bot.user.mention} has been online for ```{uptimestr}.```\", color=discord.Color.green())\n e.set_author(name=\"Uptime\", icon_url=ctx.bot.user.avatar_url)\n e.set_footer(text=f\"summoned by {ctx.author}\", icon_url=ctx.author.avatar_url)\n await ctx.send(embed=e)",
"def send_robot_status(self, robot_status):\n self.robot_status_sender.send(robot_status)",
"async def status(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if adv.is_on_adventure(ctx.author.id):\n out = adv.print_adventure(ctx.author.id)\n else:\n out = 'You are not doing anything at the moment.'\n await ctx.send(out)",
"async def tick(self):\n room = self.bot.Room.load('19961884194@chatroom')\n await room.ready()\n await room.say(f'i love you -> {datetime.now()}')",
"async def autodraw(self, ctx):\n self.initialize(ctx.message.channel.id)\n if self.settings['latest'][ctx.message.channel.id] == 0:\n return await self.bot.send_cmd_help(ctx)\n if ctx.message.author.id in self.users:\n return await self.bot.say(\"You already have a drawing in progress\")\n if ctx.message.channel.id in self.queues:\n return await self.bot.say(\"That channel has a drawing in progress\")\n\n a = dt.strptime(str(self.settings['latest'][ctx.message.channel.id]),\n \"%Y%m%d%H%M\")\n b = ctx.message.timestamp\n\n await self.mkqueue(a, b, ctx.message.channel)\n self.users.append(ctx.message.author.id)\n await self.validate(ctx.message.channel, ctx.message.author)",
"async def tweepy_on_status(self, tweet):\n self.processed_tweets += 1\n if self.skip_tweet(tweet):\n return\n\n chan_conf = dutils.get(self.conf.follows, id=tweet.author.id_str)\n try:\n embed = await self.prepare_embed(tweet)\n content = None\n except:\n embed = None\n content = 'Failed to prepare embed for ' + tweet.tweet_web_url # If the preparation failed before setting tweet.tweet_web_url imma kms\n log.error('Failed to prepare embed for ' + str(tweet._json))\n\n # Make sure we're ready to send messages\n await self.bot.wait_until_ready()\n\n for channel in chan_conf.discord_channels:\n discord_channel = self.bot.get_channel(channel.id)\n\n # Check if the channel still exists\n if discord_channel is None:\n log.error('Channel {} unavailable to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n continue\n\n # Check for required permissions\n perms = discord_channel.permissions_for(discord_channel.server.me)\n if not perms.embed_links:\n log.warning('Improper permissions in channel {} to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n try:\n warning = '\\N{WARNING SIGN} Missed tweet from {} : `Embed links` permission missing. \\N{WARNING SIGN}'.format(tweet.author.screen_name)\n await self.bot.send_message(discord_channel, warning)\n except discord.DiscordException as e:\n log.error('Could not send warning to channel {}.\\n{}'.format(discord_channel.id, e))\n continue\n\n # Send the embed to the appropriate channel\n log.debug('Scheduling Discord message on channel ({}) : {}'.format(channel.id, tweet.text))\n await self.bot.send_message(discord_channel, content=content, embed=embed)\n\n # Update stats and latest id when processing newer tweets\n if tweet.id > chan_conf.latest_received:\n channel.received_count += 1\n chan_conf.latest_received = tweet.id\n self.conf.save()",
"def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending notification in Slack\")",
"async def admin(ctx):\n info = await(bot.application_info())\n mention = info.owner.mention\n message = \"My administrator is the glorious {}. Fear them, for they are mighty.\".format(mention)\n await(ctx.send(message))",
"async def send_dm(user, message, embed=None):\n\n if type(user) is discord.User or type(user) is discord.Member:\n if user.dm_channel is None:\n await user.create_dm()\n\n await user.dm_channel.send(message, embed=embed)",
"async def on_message(self, message: discord.Message):\n \n if message.author.bot:\n return\n \n else:\n if message.channel == message.author.dm_channel:\n time_difference = (datetime.utcnow() - self.last_timeStamp).total_seconds()\n\n if time_difference < 5:\n return await message.channel.send(\"You are on cooldown!\")\n \n self.channel_id = 857690925810319381\n self.modmail_channel = self.bot.get_channel(self.channel_id)\n embed = discord.Embed(\n title = f\"Modmail From `{message.author}`\", \n description = f\"{message.content}\", \n color = 0x2c2f33\n )\n if message.attachments:\n embed.set_image(url=message.attachments[0].url)\n embed.set_footer(text=f'ID: {message.author.id}')\n\n await self.modmail_channel.send(embed=embed)\n await message.channel.send('Your message has been sent!', delete_after = 7)\n self.last_timeStamp = datetime.utcnow()",
"def send_status_update(self, agent_id: str, status: str):\n status_packet = Packet(\n packet_type=PACKET_TYPE_UPDATE_STATUS,\n subject_id=agent_id,\n data={\n \"status\": status,\n },\n )\n self._get_channel_for_agent(agent_id).enqueue_send(status_packet)",
"def scheduled_actions():\n\n # Ignore on saturdays and sundays.\n now = datetime.today()\n if now.weekday() >= 5:\n return\n\n state = State.get()\n channel_id = state.channel_id()\n\n # Morning announcement for participants to join game.\n start = datetime.combine(date.today(), MORNING_ANNOUNCE)\n end = start + MORNING_ANNOUNCE_DELTA\n if now >= start and now < end and not state.morning_announce():\n resp = client.api_call(\"chat.postMessage\", channel=channel_id,\n text=\"<!channel> Remember to join today's game before 11:50 by using `!join` or :+1: \"\n \"reaction to this message!\")\n state.set_morning_announce(resp[\"ts\"])\n state.save()\n\n # Reminder announcement for remaining participants to join game.\n start = datetime.combine(date.today(), REMINDER_ANNOUNCE)\n end = start + REMINDER_ANNOUNCE_DELTA\n if now >= start and now < end and not state.reminder_announce():\n scores = Scores.get()\n remaining = scores.recent_users(7) - set(state.participants())\n if len(remaining) == 0:\n print(\"No one to remind!\")\n # Something that won't match timestamp but still isn't None.\n state.set_reminder_announce(1)\n else:\n fmt = \", \".join([\"<@{}>\".format(uid) for uid in remaining])\n resp = client.api_call(\"chat.postMessage\", channel=channel_id,\n text=\"{} Remember to join today's game before 11:50 by using `!join` or :+1: \"\n \"reaction to this message!\".format(fmt))\n state.set_reminder_announce(resp[\"ts\"])\n state.save()\n elif now > end and state.reminder_announce():\n print(\"Clearing reminder announce\")\n state.set_reminder_announce(None)\n state.save()\n\n # Midday announcement of game.\n start = datetime.combine(date.today(), MIDDAY_ANNOUNCE)\n end = start + MIDDAY_ANNOUNCE_DELTA\n if now >= start and now < end and not state.midday_announce():\n state.set_midday_announce(True)\n state.save()\n create_matches()\n elif now > end and state.midday_announce():\n print(\"Clearing midday announce\")\n state.set_midday_announce(False)\n state.save()",
"def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")",
"async def ad_reminder(self):\r\n for guild, settings in self.bot.settings.items():\r\n if settings[\"ad_reminder_channel_id\"]:\r\n guild = self.bot.get_guild(int(guild))\r\n embed = discord.Embed(\r\n title=\"Advertising Reminder\",\r\n color=guild.me.color\r\n )\r\n # Disboard - every 2 hours\r\n if settings[\"ad_reminder_disboard\"] and datetime.now().hour % 2 == 0:\r\n embed.add_field(\r\n name='Disboard',\r\n value=f'`every 2 hours`\\nBump at [WEBSITE](https://disboard.org/server/{guild.id}) \\nor with <@302050872383242240>:\\n`!d bump`'\r\n )\r\n # Disforge - every 3 hours\r\n if settings[\"ad_reminder_disforge\"] and datetime.now().hour % 3 == 0:\r\n embed.add_field(\r\n name='Disforge',\r\n value=f'`every 3 hours`\\nBump at [WEBSITE](https://disforge.com/dashboard)'\r\n )\r\n # Discord.me\r\n if settings[\"ad_reminder_discordme\"] and datetime.now().hour % 6 == 0:\r\n embed.add_field(\r\n name='Discord.me',\r\n value=f'`every 6 hours`\\nBump at [WEBSITE](https://discord.me/dashboard)'\r\n )\r\n # discordservers\r\n if settings[\"ad_reminder_discordservers\"] and datetime.now().hour % 12 == 0:\r\n embed.add_field(\r\n name=\"discordservers\",\r\n value=f'`every 12 hours`\\nBump at [WEBSITE](https://discordservers.com/panel/{guild.id}/bump)'\r\n )\r\n # top.gg\r\n if settings[\"ad_reminder_topgg\"] and datetime.now().hour % 12 == 0:\r\n embed.add_field(\r\n name=\"top.gg\",\r\n value=f'`every 12 hours`\\nBump at [WEBSITE](https://top.gg/servers/{guild.id}/vote)'\r\n )\r\n \r\n\r\n if embed.fields:\r\n role = guild.get_role(int(settings[\"ad_reminder_role_id\"]))\r\n await guild.get_channel(\r\n int(settings[\"ad_reminder_channel_id\"])\r\n ).send(content=role.mention if role else None, embed=embed)",
"def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)",
"async def tick(self):\n need_to_save_reminders = False\n while self.reminders:\n reminded: bool = False\n # First element, but is stored as tuple\n reminder: Reminder = self.reminders[0][1]\n reminder_time: arrow.Arrow = arrow.Arrow.utcfromtimestamp(reminder.reminder_utc_timestamp)\n time_now: arrow.Arrow = arrow.utcnow()\n if reminder_time < time_now:\n reminder = heappop(self.reminders)[1]\n need_to_save_reminders = True\n reminded = True\n person: discord.User = await self._get_user_by_id(reminder.user_id)\n logger.info(f\"Attempting to remind {person.name} of: {reminder.message}\")\n channel: discord.TextChannel = await self._get_channel_by_id(reminder.channel_id)\n # Reminder was done using bot command\n if channel and person and reminder.message_id:\n message: discord.Message = await channel.fetch_message(reminder.message_id)\n await person.send(f\"{message.jump_url}\\nYou wanted to be reminded of: {reminder.message}\")\n # Reminder was done using slash command\n elif person and channel:\n # Send the reminder text\n await channel.send(f\"{person.mention} You wanted to be reminded of: {reminder.message}\")\n if not reminded:\n break\n\n # Save reminder to file because we did remind a person now\n if need_to_save_reminders:\n await self.save_reminders()",
"async def batphone(self, ctx, *, message: add_est_timestamp = None):\n\n if not message:\n return ctx.send(f\"\"\"{ctx.author.mention} I'm already doing 90% of the work. \n Do you want me to come up with the message too?\"\"\")\n\n status = twitterapi.post_tweet(message)\n\n embed = discord.Embed(title='Batphone',\n url=f'https://twitter.com/AmtrakEq/status/{status.id}',\n description=message,\n colour=discord.Colour.red())\n embed.set_image(url=gifs.get_one_gif(\"thomas the train\"))\n embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)\n\n await ctx.bot.batphone_channel.send(f'@everyone {status.text}', embed=embed)\n await ctx.message.delete()",
"async def alive(self, ctx):\n now = datetime.now()\n delta = now - runtime\n time = str(timedelta(seconds=delta.seconds)).split(\":\")\n days = \"\" if delta.days == 0 else str(delta.days) + \" days, \"\n hours = \"\" if time[0] == \"0\" else time[0] + \" hours, \"\n minutes = \"\" if time[1] == \"00\" else time[1] + \" minutes and \"\n await ctx.send(embed=discord.Embed(\n title=\"**Bot is alive!**\\n\",\n colour=discord.Color.green(),\n description=\"Python version: *3.9.0*\\n\"+\n \"discord.py version: *1.0.1*\\n\"+\n \"Repository: https://github.com/Vethya/Saitomo\\n\"+\n f\"Uptime: **{days}{hours}{minutes}{time[2]} seconds**\"\n )\n )",
"async def _discord(self, ctx: Context):\n\n await ctx.send(\n f\"You can join the Brawlcord community server by using this link: {COMMUNITY_SERVER}\"\n )",
"async def status(self, context):\n await self.send_message(context, await self.status_msg_packed(context))",
"async def _bot(ctx):\n await ctx.send('Yes, the bot is cool.')",
"def handle_command(command, person, job_status):\n # time to set schedule on Sundays\n hour = 20; error_message = True\n\n if person.status == 'active' and person.name in ACTIVE_USERS:\n workout.during(person)\n elif command == 'summary' and person.name in ACTIVE_USERS:\n person.summary_report()\n elif not person.routine and person.name in ACTIVE_USERS:\n time_str = str(hour) + ':00'\n message = 'hey @' + person.name + ', it looks like you don\\'t a schedule set up yet. we\\'ll do that for you ' \\\n + 'on *sunday* at *' + person.get_local_time(time_str) + '*.'\n person.client.api_call('chat.postMessage', channel = person.channel, text = message, as_user = True, link_names = 1)\n person.my_schedule.schedule.every().sunday.at(time_str).do(set_routine, person)\n # person.my_schedule.schedule.every(1).minutes.do(set_routine, person)\n job_status = True\n else:\n message = 'we\\'re still building out our functionality. come back in a few weeks.'\n person.client.api_call('chat.postMessage', channel=person.channel, text=message, as_user=True)\n return job_status",
"def call_schedule(self, bot, update):\n bot.send_message(update.message.chat_id, '_1 пара_ 08:30 - 10:05\\n'\n '_2 пара_ 10:25 - 12:00\\n'\n '_3 пара_ 12:20 - 13:55\\n'\n '_4 пара_ 14:15 - 15:50\\n'\n '_5 пара_ 16:10 - 17:45',\n parse_mode='Markdown')",
"async def status(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n up_time = time.time() - self.start_time\n m, s = divmod(up_time, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n stime = time.time() - psutil.boot_time()\n m, s = divmod(stime, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n system_uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n mem = psutil.virtual_memory()\n\n pid = os.getpid()\n memory_use = psutil.Process(pid).memory_info()[0]\n\n content = discord.Embed(title=f\"Miso Bot | version {main.version}\")\n content.set_thumbnail(url=self.client.user.avatar_url)\n\n content.add_field(name=\"Bot process uptime\", value=uptime_string)\n content.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent()}%\")\n content.add_field(name=\"System uptime\", value=system_uptime_string)\n\n content.add_field(name=\"System RAM Usage\", value=f\"{mem.percent}%\")\n content.add_field(name=\"Bot memory usage\", value=f\"{memory_use/math.pow(1024, 2):.2f}MB\")\n\n await ctx.send(embed=content)",
"async def status(self, ctx, *, status=None):\n # [p]set status <status>\n\n statuses = {\n \"online\": discord.Status.online,\n \"idle\": discord.Status.idle,\n \"dnd\": discord.Status.dnd,\n \"invisible\": discord.Status.invisible\n }\n\n server = ctx.message.server\n\n current_game = server.me.game if server is not None else None\n\n if status is None:\n await self.bot.change_presence(status=discord.Status.online,\n game=current_game)\n await self.bot.say(\"Status reset.\")\n else:\n status = statuses.get(status.lower(), None)\n if status:\n await self.bot.change_presence(status=status,\n game=current_game)\n await self.bot.say(\"Status changed.\")\n else:\n await send_command_help(ctx)"
]
| [
"0.6610289",
"0.64141685",
"0.6388113",
"0.6227487",
"0.6192259",
"0.5966803",
"0.59522605",
"0.59513205",
"0.59512687",
"0.5911446",
"0.5883258",
"0.5881281",
"0.5862279",
"0.5821707",
"0.57725155",
"0.57487243",
"0.5716497",
"0.56860787",
"0.566197",
"0.56558305",
"0.5654359",
"0.56406146",
"0.5637458",
"0.56348646",
"0.56037354",
"0.56015384",
"0.56005913",
"0.5598239",
"0.5595232",
"0.55778867"
]
| 0.7905984 | 0 |
Schedule setting the status of the bot | async def schedule_activity():
if controller.scheduled_activity_date is not None:
return
controller.scheduled_activity_date = datetime.now()+timedelta(seconds=30)
await wait_until(controller.scheduled_activity_date)
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='for ~command')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def schedule_status():\n while True:\n if controller.scheduled_status_date is not None:\n return\n controller.scheduled_status_date = datetime.now()+timedelta(hours=23)\n await wait_until(controller.scheduled_status_date)\n channel = await client.fetch_channel(Guard.AUTHOR_DM)\n await channel.send(**{\n 'content': controller.get_status(),\n })\n controller.scheduled_status_date = None",
"async def status(self, ctx:utils.Context, status:str):\n\n status_o = getattr(discord.Status, status.lower())\n await self.bot.change_presence(activity=self.bot.guilds[0].me.activity, status=status_o)",
"async def change_status():\n await client.change_presence(activity=discord.Game(next(appearance.status)))",
"async def status(self, ctx, *, status=None):\n # [p]set status <status>\n\n statuses = {\n \"online\": discord.Status.online,\n \"idle\": discord.Status.idle,\n \"dnd\": discord.Status.dnd,\n \"invisible\": discord.Status.invisible\n }\n\n server = ctx.message.server\n\n current_game = server.me.game if server is not None else None\n\n if status is None:\n await self.bot.change_presence(status=discord.Status.online,\n game=current_game)\n await self.bot.say(\"Status reset.\")\n else:\n status = statuses.get(status.lower(), None)\n if status:\n await self.bot.change_presence(status=status,\n game=current_game)\n await self.bot.say(\"Status changed.\")\n else:\n await send_command_help(ctx)",
"async def set_status(self, ctx, *, status: str = \"online\"):\n\n try:\n status = discord.Status[status.lower()]\n except KeyError:\n await ctx.error(\"Invalid Status\", \"Only `online`, `idle` or `dnd` statuses are available.\")\n else:\n await self.bot.change_presence(status=status, activity=ctx.me.activity)\n await ctx.success(f\"Status changed to {status}.\")",
"async def rndactivity_add_watching(self, ctx: commands.Context, *, status: str):\n await self._add_status(ctx, status, game_type=3)",
"def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)",
"def status_task():\n props = [\n (STAT_TIME, current_time),\n (STAT_CONDITION, weather_condition)\n ]\n\n # Send the status request with the current time and condition.\n send_status_request(props)\n\n # Create and start a timer to repeat this task periodically.\n t = Timer(report_interval, status_task)\n t.setDaemon(True)\n t.start()",
"def set_On(self):\n if not(self._locked):\n self.__dict__['statusOn']=True\n self._do_action()\n else:\n self._log.info('The JobProperty %s is blocked', self.__name__)",
"def UpdateStatus(self, status):\r\n self.status.update(status)",
"def SetStatus(self, status):\r\n self.status = status",
"async def status(self, ctx, *, status_type: str.lower):\n if status_type == \"clear\":\n self.bot.config.remove(\"status\")\n await self.bot.config.update()\n await self.set_presence()\n embed = Embed(title=\"Status Removed\", color=self.bot.main_color)\n return await ctx.send(embed=embed)\n status_type = status_type.replace(\" \", \"_\")\n\n status, msg = (\n await self.set_presence(status_identifier=status_type, status_by_key=True)\n )[\"status\"]\n if status is None:\n raise commands.MissingRequiredArgument(SimpleNamespace(name=\"status\"))\n\n self.bot.config[\"status\"] = status.value\n await self.bot.config.update()\n\n embed = Embed(\n title=\"Status Changed\", description=msg, color=self.bot.main_color\n )\n return await ctx.send(embed=embed)",
"def updateStatus(self, status):\n pass",
"async def set_timer(self, ctx: commands.Context, seconds: int = None):\n if seconds is not None:\n await ctx.cfg_guild.autopostseconds.set(seconds)\n await ctx.send(\"Auto-post timer has been set to {}\".format(seconds))\n else:\n seconds = await ctx.cfg_guild.autopostseconds()\n await ctx.send(f\"Currently posting every {seconds} seconds.\")",
"async def async_turn_on(self):\n path = \"/queue/simple\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"queue\"]:\n if self._ctrl.data[\"queue\"][uid][\"name\"] == f\"{self._data['name']}\":\n value = self._ctrl.data[\"queue\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.force_update()",
"def set_status(self, status):\n if status == \"offline\":\n self._status.set_message(\"N\")\n self._status.set_foreground_color(\"red\")\n \n elif status == \"online\":\n self._status.set_message(\"Y\")\n self._status.set_foreground_color(\"Green\")\n \n elif status == \"away\":\n self._status.set_message(\"A\")\n self._status.set_foreground_color(\"Grey\")\n \n elif status == \"busy\":\n self._status.set_message(\"B\")\n self._status.set_foreground_color(\"Yellow\")",
"def set_status(self, status):\n self.status = status",
"def set_status(self, status):\n self.status = status",
"def set_status(self, status):\n self.status = status",
"async def on_ready():\n await bot.change_presence(status=discord.Status.online, activity=discord.Game(activity))",
"def setstatus(self, status):\n with self.lock:\n self.status = status",
"def _set_status(self, status):\n with self.status_lock:\n if (status in _ENDING_STATUSES) or (not self.status in _ENDING_STATUSES):\n self.status = status",
"def update_status(self) -> None:\n try:\n (rc, mid) = self.mqttc.publish(\n self.config.status_topic, json.dumps(self.status), qos=0, retain=False\n )\n if rc == mqtt.MQTT_ERR_SUCCESS:\n logging.info(\n f\"The request for a status update has been successfully accepted: mid={mid}\"\n )\n else:\n logging.warning(\"The request for a status update has been rejected\")\n except ValueError as e:\n logging.warning(f\"Cannot send status update: {e}\")",
"def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away):\n print \"status changed for\",username",
"def send_robot_status(self, robot_status):\n self.robot_status_sender.send(robot_status)",
"def set_status(self, status):\n # TODO log to db\n self.status = status",
"def call_schedule(self, bot, update):\n bot.send_message(update.message.chat_id, '_1 пара_ 08:30 - 10:05\\n'\n '_2 пара_ 10:25 - 12:00\\n'\n '_3 пара_ 12:20 - 13:55\\n'\n '_4 пара_ 14:15 - 15:50\\n'\n '_5 пара_ 16:10 - 17:45',\n parse_mode='Markdown')",
"def change_status(self, status, application_id):",
"def mark_running(self):\r\n self.status = RUNNING",
"def status(self, status):\n self._status = status"
]
| [
"0.754021",
"0.64982355",
"0.6476354",
"0.6317739",
"0.6290572",
"0.61519027",
"0.61437833",
"0.6137482",
"0.60466665",
"0.5946994",
"0.59342116",
"0.5927603",
"0.59186566",
"0.5887395",
"0.5886717",
"0.5883357",
"0.5882372",
"0.5882372",
"0.5882372",
"0.588211",
"0.588192",
"0.58699137",
"0.58472985",
"0.5839499",
"0.5835693",
"0.58259934",
"0.5815979",
"0.58113205",
"0.5808686",
"0.578913"
]
| 0.7197702 | 1 |
Returns True if the command exists and is enabled | def is_enabled(command):
if command not in Controller.commands:
return False
return Controller.commands[command][2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkIfEnabled(self):\n\n # Reload the command file to check for new commands\n importlib.reload(BotSettings)\n matches = BotSettings.config['commands']\n\n # Check for the match and if it is there return the value that goes with the command\n for key in matches:\n key.strip(\"!\")\n if key == self.command:\n return matches.get(key)\n\n # If reached the command does not exist\n return False",
"def __is_active(self, command):\n return True",
"def has_command(self, command):\n for pbt in self._plugins.values():\n if pbt.command == command:\n return True\n return False",
"def is_configured(command):\n return command in COMMANDS",
"def has_commands(self) -> bool:\n return len(self.commands) > 0",
"def command_registered(self, command: str) -> bool:\n return command in self._commands",
"def has_command_with_name(self, command_name):\n return command_name in self.commands",
"def enable_execute_command(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_execute_command\")",
"def __commandExists(self, command, cmdtype):\n try:\n # method exists\n if hasattr(self, self.__getFullCommandName(command, cmdtype)):\n # command handler type exists\n if self.__commandHandlerTypeExists(cmdtype):\n return True\n else:\n return False\n else:\n return False\n # any key does not exist\n except KeyError:\n return False",
"def is_available(self):\n try :\n p = subprocess.Popen([self.program_path, self.help_argument],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.communicate()\n return p.wait() == self.help_return_code\n except OSError:\n return False",
"def _is_installed(self) -> bool:\n try:\n sh.Command(self._file_full_path)\n return True\n except sh.CommandNotFound:\n return False",
"def check_command(self):\n return self.process is not None and self.process.poll() is None",
"def is_enabled(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n return self._action('is-enabled').succeeded",
"def has_oam_cli(self):\n try:\n r = self.scripts.get_oam_status()\n except Exception:\n r = False\n return bool(r)",
"def hasCommand():\n args = sys.argv[1:]\n if '--help' in args:\n return False\n if '-h' in args:\n return False\n for arg in args:\n if arg and not arg.startswith('-'):\n return True\n return False",
"def check_command(self, cmd):\n which = \"which \" + cmd + self.echo_cmd_result\n self.connector.sendline(which)\n i = self.connector.expect(['\\r\\n0\\r\\n', '\\r\\n1\\r\\n', '\\r\\n2\\r\\n'])\n if i == 0:\n debug_log(\"command[%s] found!\", cmd)\n return True\n else:\n warn_log(\"command[%s] not found!\", cmd)\n return False",
"def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes",
"def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None",
"def has_sub_commands(self) -> bool:\n if self.__dict__.get(\"sub_commands\"):\n return True\n\n return False",
"def is_cmd(self, name):\n \n return name in self.cmds",
"def check_commands(self):\n pass",
"def is_enabled(self):",
"def enabled(name, **kwargs):\n if not available(name):\n log.error(\"Service %s not found\", name)\n return False\n\n run_file = os.path.join(SERVICE_DIR, name, \"run\")\n down_file = os.path.join(SERVICE_DIR, name, \"down\")\n\n return (\n os.path.isfile(run_file)\n and os.access(run_file, os.X_OK)\n and not os.path.isfile(down_file)\n )",
"def is_on(self):\n return self._program.get(\"enabled\") is True",
"def is_cli_installed(self):\n p = subprocess.Popen([\"which\", \"lpass\"])\n p.communicate()\n\n if p.returncode != 0:\n return False\n\n return True",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"def is_installed(cmd):\n rc, _, _ = execute(\"which %s\" % cmd, die=False)\n if rc:\n return False\n else:\n return True",
"def test_importtleCommandExists(self):\n self.assertIn('importtle', get_commands())",
"def do_known_command(self, cmd):\n if cmd in self.commands:\n return \"true\", True\n else:\n return \"false\", True",
"def command_exists(name, path=None):\n if path is None:\n path = sys.path\n\n for prefix in path:\n filename = os.path.join(prefix, name)\n is_executable = os.access(filename, os.X_OK)\n is_file = os.path.isfile(filename)\n if is_executable and is_file:\n return True\n\n return False"
]
| [
"0.8009495",
"0.73429406",
"0.72534823",
"0.71589005",
"0.71207994",
"0.70904523",
"0.7001283",
"0.68857443",
"0.6817424",
"0.68012553",
"0.6798277",
"0.67917633",
"0.6757153",
"0.6734743",
"0.6718272",
"0.667678",
"0.66564655",
"0.65755635",
"0.65725994",
"0.655036",
"0.6549372",
"0.65479875",
"0.6538313",
"0.653753",
"0.6535578",
"0.6532034",
"0.65107626",
"0.64934045",
"0.64771295",
"0.64593506"
]
| 0.80444074 | 0 |
Returns the wikitext of the specified item. This method handles redirects as well. | async def get_wikitext(item):
item = item.strip()
url = Controller.WIKI_API_REV_URL + item
response = await Controller.http_get(url)
try:
pages = json.loads(response)['query']['pages']
key = list(pages.keys())[0]
if key == '-1':
raise ValueError('Page not found')
wikitext = pages[key]['revisions'][0]['slots']['main']['*']
while wikitext.startswith('#REDIRECT'):
item = re.findall('([^[\]]+)(?:\]|$)', wikitext[len('#REDIRECT'):].strip())[0]
url = Controller.WIKI_API_REV_URL + item
response = await Controller.http_get(url)
pages = json.loads(response)['query']['pages']
key = list(pages.keys())[0]
wikitext = pages[key]['revisions'][0]['slots']['main']['*']
return wikitext
except ValueError as e:
raise
except Exception as e:
logging.info(response)
logging.error(e)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetItemText(self, item):\r\n\r\n return item.GetText()",
"def _getText(self, item): # TODO: move this method to more suitable place when possible (scripting base class)\r\n if not isinstance(item, basestring):\r\n return item\r\n\r\n translation = self.phone.getTranslation(item)\r\n if translation != None:\r\n return translation\r\n\r\n return item",
"def get_text(item_id):\n if item_id in all_items:\n return all_items[item_id]['text']\n return None",
"def get_full_text(self, item):\n text_content = self.db.plugin_text_text(item_id=item.unique_id)\n output = self.response.render(\n 'plugin_text/full_text.txt',\n dict(text_content=text_content, item=item))\n return unicode(output.decode('utf-8'))",
"def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''",
"def get_full_text(self, item):\n photoset_content = self.db.plugin_photoset_content(\n item_id=item.unique_id)\n output = self.response.render(\n 'plugin_photoset/full_text.txt',\n dict(photoset_content=photoset_content, item=item))\n return unicode(output.decode('utf-8'))",
"def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''",
"def OnGetItemText(self, item, col):\n\n return self.get_item_text(item, col)",
"def get_text(self):",
"def get_text(self):\n return self.res.text",
"def get_text(self) -> str:\n return self.text",
"def GetItemText(self, item, column=None):\r\n\r\n if self.IsVirtual():\r\n return self._owner.OnGetItemText(item, column)\r\n else:\r\n return item.GetText(column)",
"def get_text(self):\n return self.text",
"def getInfo(self,item):\r\n return ''",
"def get_contents(self, item):\n return self.decrypt(item.fullpath)",
"def get_item_txt(self, mediaId):\n headers = { 'Authorization' : self.client.authorization_header }\n\n response = requests.get(\n self.client.url + '/media/' + mediaId + '/transcript/text',\n headers = headers\n )\n \n return(response.text)",
"def text(self):\n txt = self.web_element.text\n return txt",
"def Item(self) -> str:",
"def Item(self) -> str:",
"def Item(self) -> str:",
"def get_text(self) -> str:\n return self._text",
"def _get_item_string(\n cls,\n item: CmdTemplate,\n json: bool = False,\n ) -> str:\n return misc_utils.get_cmd_template_string(item, json)",
"def _get_plain_text(self, url, soup, site):\n print('Get plaint text: ' + url)\n title = str(soup.find(class_=self._title_tags[site]))\n content = str(soup.find(class_=self._content_tags[site]))\n # h = html2text.HTML2Text() # uncomment this segment of code\n # h.ignore_links = True # if you want to get plain text\n # h.ignore_images = True\n # title = h.handle(title)\n # content = h.handle(content)\n if title == None or content == None:\n print('Different website structure: ' + url)\n return ''\n return self._clean(title + content, no_punc=True) # with symbols\n # return title + content # without symbols",
"def OnGetItemText(self, item, column):\r\n \r\n return \"\"",
"def post_get_convert(self, site, getText):\n return getText",
"def get_text(self):\n text_element = self.page.find(id=self.text_location)\n return text_element.get_text()",
"def get_text(self):\n c=self.xpath_eval(\"ns:*\")\n if not c:\n self.upgrade()\n t=self.xpath_eval(\"ns:text\")\n if not t:\n return None\n return from_utf8(t[0].getContent())",
"async def getTroubleText(self):\n trouble_text = await self.director.getItemVariableValue(\n self.item_id, \"TROUBLE_TEXT\"\n )\n return trouble_text",
"def plain_text(self) -> str:\n return pulumi.get(self, \"plain_text\")",
"def GetText(self):\r\n \r\n return self._text"
]
| [
"0.71310467",
"0.6846198",
"0.6360924",
"0.63444716",
"0.6208977",
"0.6193657",
"0.6024532",
"0.5965504",
"0.592966",
"0.589569",
"0.5876253",
"0.5847495",
"0.58415383",
"0.5787497",
"0.57604456",
"0.5738865",
"0.5703757",
"0.5667341",
"0.5667341",
"0.5667341",
"0.56493497",
"0.5630111",
"0.56225455",
"0.5620539",
"0.5607609",
"0.5602867",
"0.55936867",
"0.5589526",
"0.55832857",
"0.55772793"
]
| 0.79088044 | 0 |
Returns the canonical title for the given title, if found | async def canonical_title(title):
url = Controller.WIKI_API_SEARCH_URL + title
response = await Controller.http_get(url)
try:
pages = json.loads(response)['query']['search']
if len(pages) == 0:
return None
for page in pages:
if page['title'].lower() == title.lower():
return page['title']
return pages[0]['title']
except:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _resolve_title(self, title):\n try:\n return config_data.get(\"Website\", title)\n except Exception as e:\n print(\"Title URL not configured\")\n raise e",
"def normalize_title(self, title):\n return \" \".join(w[0].capitalize() + w[1:] for w in title.split())",
"def get_link_title(properties):\n if not properties:\n return \"Unknown title\"\n\n # Try plausible fields for link titles.\n possible_title_field_names = ['name', 'title', 'heading', 'main']\n for title in possible_title_field_names:\n for k in properties.keys():\n if str.upper(title) in str.upper(str(k)):\n return properties[k]\n\n # Fall back to the first items in the dict.\n return ' '.join([str(a) for a in properties.items()[0]])",
"def alias_by_title(self, title):\n logging.debug('alias_by_title(%s)', title)\n for alias, a_title in self.config['alias'].items():\n if a_title == title:\n return alias\n\n return ''",
"def _parse_title(self, links):\n for link in links:\n if \"hearing\" in link[\"title\"].lower():\n return link[\"title\"].replace(\"Notice\", \"\").strip()\n if \"special\" in link[\"title\"].lower():\n return \"Special Meeting\"\n return \"Illinois Medical District Commission\"",
"def safe_title(self):\n try:\n return self.title\n except ObjectDoesNotExist:\n return None",
"def getPredictedAnchor(title: str) -> str:\n title = title.lower()\n if title.startswith('npj '):\n return 'npj series'\n title = re.sub(r'^(the|a|an|der|die|das|den|dem|le|la|les|el|il)\\s+', '',\n title)\n return title[0].upper()",
"def normalise_title(title):\n normalised = title.lower()\n if normalised.startswith('the '):\n normalised = normalised[4:]\n normalised = re.sub('[^a-z ]', '', normalised)\n normalised = re.sub(' +', ' ', normalised)\n normalised = normalised.replace(' the ', ' ')\n return normalised",
"def get_title(url):\r\n import chardet\r\n if not url or not url.startswith('http://'): return None\r\n try:\r\n content = urlopen(url).read()\r\n t = r_title.findall(content)\r\n if t:\r\n title = t[0].strip()\r\n en = (r_charset.findall(content) or\r\n r_encoding.findall(content))\r\n encoding = en[0] if en else chardet.detect(content)[\"encoding\"]\r\n if encoding:\r\n title = unicode(title, encoding).encode(\"utf-8\")\r\n return title\r\n except: return None",
"def get_unified_journal_title(journal_full_title):\n\n return mappings.JOURNAL_MAPPINGS.get(journal_full_title, journal_full_title)",
"def correct_cap(title):\n try:\n fl = fln[title]\n return title\n except:\n #capitalize first letter only\n try:\n fl = fln[title[0].upper() + title[1:]]\n return title[0].upper() + title[1:]\n except:\n #try title case\n try:\n fl = fln[title.title()]\n return title.title()\n except KeyError:\n return \"\"",
"def sanitize_title(title):\n # Discard everything after the colon\n title = title.split(':')[0]\n title.replace('.', '')\n return title.lower()",
"def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title",
"def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title",
"def get_valid_title(title):\n if len(title) >= 254:\n title = title[:254]\n return title",
"def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title",
"def slugify_title(title, datetimeon):\n year, month = datetimeon.year, datetimeon.month\n the_slug_start = f'{year}/{month}/'\n max_length = 200 - len(the_slug_start)\n return the_slug_start + slugify(title, max_length=max_length)",
"def get_title():",
"def get_title(content):\n content = content[:100000]\n pa = re.compile(\"<title.*?>(.*?)<\\/title>\", re.DOTALL | re.IGNORECASE)\n match = re.search(pa, content)\n title = \"\"\n if match != None:\n title_found = match.group(1)\n title = title_found.replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"\\t\", \" \")\n return title",
"def sortable_title(portal, title):\n if not title:\n return ''\n\n def_charset = portal.plone_utils.getSiteEncoding()\n sortabletitle = title.lower().strip()\n # Replace numbers with zero filled numbers\n sortabletitle = num_sort_regex.sub(zero_fill, sortabletitle)\n # Truncate to prevent bloat\n for charset in [def_charset, 'latin-1', 'utf-8']:\n try:\n sortabletitle = unicode(sortabletitle, charset)[:30]\n sortabletitle = sortabletitle.encode(def_charset or 'utf-8')\n break\n except UnicodeError:\n pass\n except TypeError:\n # If we get a TypeError if we already have a unicode string\n sortabletitle = sortabletitle[:30]\n break\n return sortabletitle",
"def __calculate_slug(self):\n\n return slugify.slugify(self.metadata['title'])",
"def getSortTitle(dictList):\n\ttitle = dictList['title'].lower().strip()\n\tfirstword = title.split(\" \",1)[0]\n\tif firstword in ['a', 'an', 'the']:\n\t\ttitle = title.split(firstword, 1)[-1]\n\treturn title.strip()",
"def get_title(self) -> Optional[str]:\n return self.title",
"def clean_title(title):\n title = re.sub(\"\\n\", \"\", title) # Remove newlines\n title = ' '.join(title.split()) # Turn multiple whitespaces into a single one\n title = title.lower() # Make everything lowercase\n return title",
"def get_title(self, article):\r\n\r\n title = ''\r\n doc = article.doc\r\n\r\n title_element = self.parser.getElementsByTag(doc, tag='title')\r\n # no title found\r\n if title_element is None or len(title_element) == 0:\r\n return title\r\n\r\n # title elem found\r\n title_text = self.parser.getText(title_element[0])\r\n used_delimeter = False\r\n\r\n # split title with |\r\n if '|' in title_text:\r\n title_text = self.split_title(title_text, PIPE_SPLITTER)\r\n used_delimeter = True\r\n\r\n # split title with -\r\n if not used_delimeter and '-' in title_text:\r\n title_text = self.split_title(title_text, DASH_SPLITTER)\r\n used_delimeter = True\r\n\r\n # split title with »\r\n if not used_delimeter and u'»' in title_text:\r\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\r\n used_delimeter = True\r\n\r\n # split title with :\r\n if not used_delimeter and ':' in title_text:\r\n title_text = self.split_title(title_text, COLON_SPLITTER)\r\n used_delimeter = True\r\n\r\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\r\n return title",
"def title(value):\r\n title_word = lambda w: w if RE_UPPERCASE.search(w) else old_title(w)\r\n return re.sub('(\\S+)', lambda m: title_word(m.group(0)), value)",
"def _normalize_title(self, article_title):\n morph = pymorphy2.MorphAnalyzer()\n title_words = article_title.lower().strip(\n string.punctuation + \"«»\"\n ).split(' ')\n title_morphs = (morph.parse(word)[0] for word in title_words)\n title_nouns = filter(\n lambda word_morph: 'NOUN' in word_morph.tag, title_morphs\n )\n return collections.Counter(\n map(lambda noun: noun.normal_form, title_nouns)\n )",
"def getTitle(self, article):\n \n title = ''\n doc = article.doc\n \n titleElem = Parser.getElementsByTag(doc, tag='title')\n # no title found\n if titleElem is None or len(titleElem) == 0:\n return title\n \n # title elem found\n titleText = Parser.getText(titleElem[0])\n usedDelimeter = False\n \n # split title with |\n if '|' in titleText:\n titleText = self.doTitleSplits(titleText, PIPE_SPLITTER)\n usedDelimeter = True\n \n # split title with -\n if not usedDelimeter and '-' in titleText:\n titleText = self.doTitleSplits(titleText, DASH_SPLITTER)\n usedDelimeter = True\n \n # split title with »\n if not usedDelimeter and u'»' in titleText:\n titleText = self.doTitleSplits(titleText, ARROWS_SPLITTER)\n usedDelimeter = True\n \n # split title with :\n if not usedDelimeter and ':' in titleText:\n titleText = self.doTitleSplits(titleText, COLON_SPLITTER)\n usedDelimeter = True\n \n title = MOTLEY_REPLACEMENT.replaceAll(titleText)\n return title",
"def try_create_uniqe_title(self,title,owner):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,owner):\n return new_title\n return False\n else:\n return False",
"def getTitle(self):\n\n # print(self.soupObject.title.string)\n try:\n s = self.soupObject.find(\"meta\", attrs={\"name\": \"twitter:title\"})\n self.title = str(s['content'])\n self.title = self.title.replace(\"/\", \"\")\n self.title = self.title.strip()\n if not self.title:\n s = int(\"deliberateError\")\n\n # except\n except:\n self.title = \"Amazonsubtitles\"\n\n pass"
]
| [
"0.68825155",
"0.66403437",
"0.6610627",
"0.66068155",
"0.6586052",
"0.65578806",
"0.65400726",
"0.64582205",
"0.64436924",
"0.6440577",
"0.6421765",
"0.63735795",
"0.6332046",
"0.6319642",
"0.628051",
"0.6260701",
"0.62309647",
"0.6217494",
"0.6215482",
"0.62082005",
"0.6205053",
"0.62001544",
"0.61971205",
"0.6191358",
"0.61893165",
"0.6184877",
"0.6179433",
"0.6174711",
"0.6139847",
"0.6135678"
]
| 0.8441544 | 0 |
Replies the user with the wikilink for the specified item | async def link(self, msg, item=None, *args):
if not Guard.has_permission(msg, 'embed_links'):
await msg.channel.send(**{
'content': 'Cannot send links on this channel',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
if not item:
return
if args:
item = f'{item} {" ".join(args)}'
title = await Controller.canonical_title(item)
if title is None:
await msg.channel.send(**{
'content': f'There are no pages matching `{item}`',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
page_url = Controller.link_from_title(title)
await msg.channel.send(**{
'content': page_url,
'reference': msg.to_reference(),
'mention_author': True,
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def item_link(self, obj):\n if obj.item is None:\n return '\\N{EM DASH}'\n\n return format_html(\n '<a href=\"{}\">{}</a>',\n reverse('admin:mediaplatform_mediaitem_change', args=(obj.item.pk,)),\n obj.item.title if obj.item.title != '' else '[Untitled]'\n )",
"async def recipe(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'I need embed_links permission to answer in this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n canonical = await Controller.canonical_title(item)\n if canonical:\n item = canonical\n page_url = Controller.link_from_title(item)\n if item == 'BelAZ':\n content = f'To complete the mission \"Moving Town\" to get BelAZ, you need:\\n'\n content += '• Nuclear reactor part x3\\n'\n content += '• Clean water x1,000\\n'\n content += '• Steel tools x10\\n'\n content += '• Auto spare parts x500\\n'\n content += '• Iron pipe x1,000\\n'\n content += '• Car battery x50\\n'\n content += '• Scrap x100,000\\n'\n content += '• Steel x50,000\\n'\n content += '• Lead x100,000\\n'\n content += '• Nail x10,000\\n'\n content += '• Wire x10,000\\n'\n content += '• Insulating tape x1,000'\n await msg.channel.send(**{\n 'content': content,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })\n return\n try:\n wikitext = await Controller.get_wikitext(item)\n except ValueError as e:\n # Means the page is not found\n await msg.channel.send(**{\n 'content': f'No page found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n try:\n emojis = {emoji.name.lower(): f'<:{emoji.name}:{emoji.id}> ' for emoji in msg.guild.emojis if emoji.available}\n for k, v in list(emojis.items()):\n emojis[k+'s'] = v\n except:\n emojis = {}\n parsed = WTP.parse(wikitext)\n content = None\n template_names = []\n for template in parsed.templates:\n template_names.append(template.name.strip())\n if template.name.strip().lower() == 'recipe':\n args = template.arguments\n logging.info(args)\n ingredients = []\n tools = []\n level = []\n points = []\n def parse_args(args):\n idx = 0\n while idx < len(args):\n arg = args[idx].string.strip(' |')\n if arg == '':\n idx += 1\n continue\n if '=' not in arg:\n amount = args[idx+1].string.strip(' |')\n if not amount or amount != '0':\n ingredients.append(f'{emojis.get(arg.lower().replace(\" \", \"_\"), \"\")}{arg.capitalize()} x{amount}')\n else:\n ingredients.append(f'{emojis.get(arg.lower().replace(\" \", \"_\"), \"\")}{arg.capitalize()}')\n idx += 1\n elif arg.startswith('Tool'):\n tools.append(arg.split('=', maxsplit=1)[1].strip().capitalize())\n elif arg.startswith('input'):\n templates = WTP.parse(arg.split('=', maxsplit=1)[1].strip()).templates\n if len(templates) > 0:\n parse_args(templates[0].arguments)\n elif arg.startswith('level'):\n try:\n level.append(int(arg.split('=')[1].strip()))\n except:\n pass\n elif arg.startswith('research'):\n try:\n points.append(int(arg.split('=')[1].strip()))\n except:\n pass\n idx += 1\n parse_args(args)\n requirements = ''\n if level:\n if points:\n requirements = f' (level {level[0]}, {points[0]} points)'\n else:\n requirements = f' (level {level[0]})'\n ingredients = '• '+'\\n• '.join(ingredients)\n tools = '• '+'\\n• '.join(tools) if tools else ''\n content = f'To craft {item}{requirements}, you need:\\n{ingredients}'\n if tools:\n content = f'{content}\\nAnd these tools:\\n{tools}'\n break\n logging.info(f'Templates in {item}: {\", \".join(template_names)}')\n for table in parsed.tables:\n if 'Ingredients' in table:\n rows = table.string.split('|-')[1:]\n ingredients = [row.strip(' \\t\\n|').split('\\n')[0].strip(' \\t\\n|').replace('[[', '').replace(']]', '').split('|')[-1] for row in rows]\n ingredients = [f'{emojis.get(\" \".join(ingredient.split(\" \")[:-1]).lower().replace(\" \", \"_\"), \"\")}{ingredient}' for ingredient in ingredients]\n ingredients = '• '+'\\n• '.join(ingredients)\n content = f'To cook {item}, you need:\\n{ingredients}'\n break\n if content is None:\n await msg.channel.send(**{\n 'content': f'No recipe found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n content += f'\\nSource: {page_url}'\n await msg.channel.send(**{\n 'content': content,\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })",
"def wrap_spotify_link(item, text=''):\n\n # generate default text if no text has been given\n if not text:\n name = item['name']\n if item['type'] == 'playlist':\n user = SP.user(item['owner']['id'])['display_name']\n text = f'{name} by {user}'\n elif item['type'] == 'artist':\n text = name\n else:\n artist = item['artists'][0]['name']\n text = f'{name} by {artist}'\n\n link = item['external_urls']['spotify']\n return f'<a href=\"{link}\">{text}</a>'",
"async def get_wikitext(item):\n item = item.strip()\n url = Controller.WIKI_API_REV_URL + item\n response = await Controller.http_get(url)\n try:\n pages = json.loads(response)['query']['pages']\n key = list(pages.keys())[0]\n if key == '-1':\n raise ValueError('Page not found')\n wikitext = pages[key]['revisions'][0]['slots']['main']['*']\n while wikitext.startswith('#REDIRECT'):\n item = re.findall('([^[\\]]+)(?:\\]|$)', wikitext[len('#REDIRECT'):].strip())[0]\n url = Controller.WIKI_API_REV_URL + item\n response = await Controller.http_get(url) \n pages = json.loads(response)['query']['pages']\n key = list(pages.keys())[0]\n wikitext = pages[key]['revisions'][0]['slots']['main']['*']\n return wikitext\n except ValueError as e:\n raise\n except Exception as e:\n logging.info(response)\n logging.error(e)\n return None",
"async def info(self, msg, item=None, *args):\n if not Guard.has_permission(msg, 'embed_links'):\n await msg.channel.send(**{\n 'content': 'I need embed_links permission to answer in this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not item:\n return\n if args:\n item = f'{item} {\" \".join(args)}'\n canonical = await Controller.canonical_title(item)\n if canonical:\n item = canonical\n page_url = Controller.link_from_title(item)\n try:\n wikitext = await Controller.get_wikitext(item)\n except ValueError as e:\n # Means the page is not found\n await msg.channel.send(**{\n 'content': f'No page found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n contents = []\n template_names = []\n for template in WTP.parse(wikitext).templates:\n template_names.append(template.name.strip())\n if self.is_infobox(template.name):\n args = template.arguments\n title = item\n entries = {}\n for arg in args:\n k, v = arg.string.strip(' |\\n').split('=')\n k = k.strip()\n v = v.strip()\n if k.lower() in ['title1', 'name']:\n # Set this as the item name\n title = v\n elif k.lower() in ['image1', 'image'] or not v:\n # Skip images and empty values\n continue\n else:\n entries[k] = v.replace('\\n\\n', '\\n').replace('\\n', '\\n\\t')\n entries = [f'{k} = {v}' for k, v in entries.items()]\n entries = '• '+'\\n• '.join(entries)\n content = f'## **{title}** ##\\nSource: {page_url}\\n{template.name.strip()}\\n{entries}'\n contents.append(content)\n logging.info(f'Templates at {item}: '+', '.join(template_names))\n if not contents:\n await msg.channel.send(**{\n 'content': f'No infobox found for `{item}`',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n await msg.channel.send(**{\n 'content': '\\n===\\n'.join(contents),\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })",
"def procesPage(self, page):\n item = pywikibot.ItemPage.fromPage(page)\n pywikibot.output('Processing %s' % page)\n if not item.exists():\n pywikibot.output('%s doesn\\'t have a wikidata item :(' % page)\n #TODO FIXME: We should provide an option to create the page\n else:\n pagetext = page.get()\n templates = pywikibot.extract_templates_and_params(pagetext)\n for (template, fielddict) in templates:\n # Clean up template\n template = pywikibot.Page(page.site, template,\n ns=10).title(withNamespace=False)\n # We found the template we were looking for\n if template in self.templateTitles:\n for field, value in fielddict.items():\n field = field.strip()\n value = value.strip()\n # This field contains something useful for us\n if field in self.fields:\n # Check if the property isn't already set\n claim = pywikibot.Claim(self.repo, self.fields[field])\n if claim.getID() in item.get().get('claims'):\n pywikibot.output(\n u'A claim for %s already exists. Skipping'\n % claim.getID())\n # TODO FIXME: This is a very crude way of dupe\n # checking\n else:\n if claim.getType() == 'wikibase-item':\n # Try to extract a valid page\n match = re.search(pywikibot.link_regex, value)\n if match:\n try:\n link = pywikibot.Link(match.group(1))\n linkedPage = pywikibot.Page(link)\n if linkedPage.isRedirectPage():\n linkedPage = linkedPage.getRedirectTarget()\n linkedItem = pywikibot.ItemPage.fromPage(linkedPage)\n claim.setTarget(linkedItem)\n except pywikibot.exceptions.NoPage:\n pywikibot.output('[[%s]] doesn\\'t exist so I can\\'t link to it' % (linkedItem.title(),))\n continue\n elif claim.getType() == 'string':\n claim.setTarget(value.strip())\n else:\n pywikibot.output(\"%s is not a supported datatype.\" % claim.getType())\n continue\n\n pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget()))\n item.addClaim(claim)\n # A generator might yield pages from multiple sites\n source = self.getSource(page.site)\n if source:\n claim.addSource(source, bot=True)",
"def procesPage(self, page):\n item = pywikibot.ItemPage.fromPage(page)\n pywikibot.output('Processing %s' % page)\n if not item.exists():\n pywikibot.output('%s doesn\\'t have a wikidata item :(' % page)\n #TODO FIXME: We should provide an option to create the page\n else:\n pagetext = page.get()\n templates = pywikibot.extract_templates_and_params(pagetext)\n for (template, fielddict) in templates:\n # We found the template we were looking for\n if template.replace(u'_', u' ')==self.templateTitle:\n for field, value in fielddict.items():\n # This field contains something useful for us\n if field in self.fields:\n # Check if the property isn't already set\n claim = pywikibot.Claim(self.repo, self.fields[field])\n if claim.getID() in item.get().get('claims'):\n pywikibot.output(u'A claim for %s already exists. Skipping' % (claim.getID(),))\n #TODO FIXME: This is a very crude way of dupe checking\n else:\n # Try to extract a valid page\n match = re.search(pywikibot.link_regex, value)\n if match:\n try:\n link = pywikibot.Link(match.group(1))\n linkedPage = pywikibot.Page(link)\n if linkedPage.isRedirectPage():\n linkedPage = linkedPage.getRedirectTarget()\n linkedItem = pywikibot.ItemPage.fromPage(linkedPage)\n claim.setTarget(linkedItem)\n pywikibot.output('Adding %s --> %s' % (claim.getID(), claim.getTarget().getID()))\n item.addClaim(claim)\n if self.source:\n claim.addSource(self.source, bot=True)\n except pywikibot.exceptions.NoPage:\n pywikibot.output('[[%s]] doesn\\'t exist so I can\\'t link to it' % (linkedItem.title(),))",
"def item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to view the item because\n # the item is private and was created by a different user, send a\n # 403\n elif item.private and current_user != item.user:\n abort(403)\n\n return render_template('item.html', item=item)",
"def get_url(self, item):\n config = {}\n uuid = self.data.get('uuid', None)\n obj = uuidToObject(uuid)\n if uuid and obj:\n config = copy.copy(self.get_config(obj))\n\n url = u'{0}{1}'.format(self.view_url(obj), item.id.value)\n if config.get('modify_url', True):\n url = u'{0}___{1}-{2}'.format(\n url,\n item.title.value,\n item.location.value,\n )\n return url",
"def get_item_url(self, item):\n return self.get_absolute_url(item, 'detail')",
"def linkTo(sharedProxyOrItem):\n if isinstance(sharedProxyOrItem, sharing.SharedProxy):\n userStore = sharing.itemFromProxy(sharedProxyOrItem).store\n else:\n userStore = sharedProxyOrItem.store\n appStore = isAppStore(userStore)\n if appStore:\n # This code-path should be fixed by #2703; PublicWeb is deprecated.\n from xmantissa.publicweb import PublicWeb\n substore = userStore.parent.getItemByID(userStore.idInParent)\n pw = userStore.parent.findUnique(PublicWeb, PublicWeb.application == substore)\n path = [pw.prefixURL.encode('ascii')]\n else:\n for lm in userbase.getLoginMethods(userStore):\n if lm.internal:\n path = ['users', lm.localpart.encode('ascii')]\n break\n else:\n raise RuntimeError(\n \"Shared item is in a user store with no\"\n \" internal username -- can't generate a link.\")\n if (sharedProxyOrItem.shareID == getDefaultShareID(userStore)):\n shareID = sharedProxyOrItem.shareID\n path.append('')\n else:\n shareID = None\n path.append(sharedProxyOrItem.shareID)\n return _ShareURL(shareID, scheme='', netloc='', pathsegs=path)",
"def user2Link(user): \n # could also look up mail addrs via a table lookup, etc\n return '<a href=\"mailto:%(user)[email protected]\">%(user)s</a>' % {\"user\": user}",
"def getLink(self):",
"def get_link(self, list_item):\n link = list_item.find('a', {'class': 'biz-name'})\n return self.base_url + link.get('href')",
"def HandleHyperLink(self, item):\r\n\r\n if self.IsItemHyperText(item):\r\n event = TreeEvent(wxEVT_TREE_ITEM_HYPERLINK, self.GetId())\r\n event._item = item\r\n self.GetEventHandler().ProcessEvent(event)",
"def _link_items(self):\n pass",
"def work_link(self, obj):\n url = reverse('admin:music_publisher_work_change', args=[obj.work.id])\n link = '<a href=\"{}\">{}</a>'.format(url, obj.work)\n return mark_safe(link)",
"def get_link(self, user_input):\r\n\r\n\t\t# state that you made it this far\r\n\t\tprint(f\"\\nSuccessfully called get_link() with the parameter(s): \\n\\n\\tuser_input -> {user_input}\")\r\n\r\n\t\t# tokenize the user's input, removing words like \"is\", \"the\", \"it\" and so on...\r\n\t\ttokens = self.tokenize(user_input)\r\n\r\n\t\t# categorize the question\r\n\t\tprint(f\"\\nIdentifying question's category...\")\r\n\t\tcategory = self.bayesian_naive_logic(tokens)\r\n\r\n\t\t# start looking for a link that may provide a Answer\r\n\t\tresponse_set = self.storage.get_urls(tokens, category)\r\n\t\tprint(f\"\\nBest Answer found: {response_set}\")\r\n\r\n\t\treturn f\"Here is a link with information closely matching your question: <a href='{response_set}' target='_blank'>{response_set}</a>\"",
"def view_item(request, item_pk):\n return HttpResponse('This is where we view item ' + item_pk)",
"def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")",
"def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")",
"def createLinkFromWikiWord(word, wikiPage): # normalizeWikiWord\r\n return \"\"",
"def share_link(cls, user, link):",
"def share_link(cls, user, link):",
"def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:",
"def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:",
"def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/collection/{}'.format(obj.id)\n )",
"def user(inp):\n user = inp.text.lower().replace(' ', '-')\n return 'http://www.wikidot.com/user:info/' + user",
"def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass",
"async def iteminfo(self, ctx, *, item: str):\n items = await self.bot.di.get_guild_items(ctx.guild)\n item = items.get(item)\n if not item:\n await ctx.send(await _(ctx, \"Item doesnt exist!\"))\n return\n if hasattr(item, \"description\"):\n embed = discord.Embed(title=item.name, description=item.description, color=randint(0, 0xFFFFFF),)\n else:\n embed = discord.Embed(title=item.name, color=randint(0, 0xFFFFFF),)\n\n embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)\n embed.add_field(name=await _(ctx, \"Name\"), value=item.name)\n img = item.meta.get(\"image\")\n embed.set_thumbnail(url=str(img)) if img else None\n for key, value in item.meta.items():\n if key == \"image\":\n continue\n embed.add_field(name=key, value=value)\n\n await ctx.send(embed=embed)"
]
| [
"0.6142394",
"0.61364466",
"0.6084915",
"0.60085547",
"0.59002894",
"0.5884859",
"0.5846574",
"0.5835982",
"0.5824301",
"0.5748178",
"0.5720398",
"0.5717556",
"0.5700181",
"0.56942743",
"0.56676304",
"0.5665238",
"0.5580297",
"0.55604523",
"0.5471592",
"0.54502934",
"0.54502934",
"0.54483175",
"0.5438855",
"0.5438855",
"0.5426983",
"0.5426983",
"0.54208493",
"0.5414079",
"0.54128975",
"0.53771174"
]
| 0.6284063 | 0 |
Replies the user with the crafting recipe of the given item | async def recipe(self, msg, item=None, *args):
if not Guard.has_permission(msg, 'embed_links'):
await msg.channel.send(**{
'content': 'I need embed_links permission to answer in this channel',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
if not item:
return
if args:
item = f'{item} {" ".join(args)}'
canonical = await Controller.canonical_title(item)
if canonical:
item = canonical
page_url = Controller.link_from_title(item)
if item == 'BelAZ':
content = f'To complete the mission "Moving Town" to get BelAZ, you need:\n'
content += '• Nuclear reactor part x3\n'
content += '• Clean water x1,000\n'
content += '• Steel tools x10\n'
content += '• Auto spare parts x500\n'
content += '• Iron pipe x1,000\n'
content += '• Car battery x50\n'
content += '• Scrap x100,000\n'
content += '• Steel x50,000\n'
content += '• Lead x100,000\n'
content += '• Nail x10,000\n'
content += '• Wire x10,000\n'
content += '• Insulating tape x1,000'
await msg.channel.send(**{
'content': content,
'reference': msg.to_reference(),
'mention_author': True,
})
return
try:
wikitext = await Controller.get_wikitext(item)
except ValueError as e:
# Means the page is not found
await msg.channel.send(**{
'content': f'No page found for `{item}`',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
try:
emojis = {emoji.name.lower(): f'<:{emoji.name}:{emoji.id}> ' for emoji in msg.guild.emojis if emoji.available}
for k, v in list(emojis.items()):
emojis[k+'s'] = v
except:
emojis = {}
parsed = WTP.parse(wikitext)
content = None
template_names = []
for template in parsed.templates:
template_names.append(template.name.strip())
if template.name.strip().lower() == 'recipe':
args = template.arguments
logging.info(args)
ingredients = []
tools = []
level = []
points = []
def parse_args(args):
idx = 0
while idx < len(args):
arg = args[idx].string.strip(' |')
if arg == '':
idx += 1
continue
if '=' not in arg:
amount = args[idx+1].string.strip(' |')
if not amount or amount != '0':
ingredients.append(f'{emojis.get(arg.lower().replace(" ", "_"), "")}{arg.capitalize()} x{amount}')
else:
ingredients.append(f'{emojis.get(arg.lower().replace(" ", "_"), "")}{arg.capitalize()}')
idx += 1
elif arg.startswith('Tool'):
tools.append(arg.split('=', maxsplit=1)[1].strip().capitalize())
elif arg.startswith('input'):
templates = WTP.parse(arg.split('=', maxsplit=1)[1].strip()).templates
if len(templates) > 0:
parse_args(templates[0].arguments)
elif arg.startswith('level'):
try:
level.append(int(arg.split('=')[1].strip()))
except:
pass
elif arg.startswith('research'):
try:
points.append(int(arg.split('=')[1].strip()))
except:
pass
idx += 1
parse_args(args)
requirements = ''
if level:
if points:
requirements = f' (level {level[0]}, {points[0]} points)'
else:
requirements = f' (level {level[0]})'
ingredients = '• '+'\n• '.join(ingredients)
tools = '• '+'\n• '.join(tools) if tools else ''
content = f'To craft {item}{requirements}, you need:\n{ingredients}'
if tools:
content = f'{content}\nAnd these tools:\n{tools}'
break
logging.info(f'Templates in {item}: {", ".join(template_names)}')
for table in parsed.tables:
if 'Ingredients' in table:
rows = table.string.split('|-')[1:]
ingredients = [row.strip(' \t\n|').split('\n')[0].strip(' \t\n|').replace('[[', '').replace(']]', '').split('|')[-1] for row in rows]
ingredients = [f'{emojis.get(" ".join(ingredient.split(" ")[:-1]).lower().replace(" ", "_"), "")}{ingredient}' for ingredient in ingredients]
ingredients = '• '+'\n• '.join(ingredients)
content = f'To cook {item}, you need:\n{ingredients}'
break
if content is None:
await msg.channel.send(**{
'content': f'No recipe found for `{item}`',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
content += f'\nSource: {page_url}'
await msg.channel.send(**{
'content': content,
'reference': msg.to_reference(),
'mention_author': True,
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def func(self):\n caller = self.caller\n\n if not self.args or not self.recipe:\n self.caller.msg(\"Usage: craft <recipe> from <ingredient>, ... [using <tool>,...]\")\n return\n\n ingredients = []\n for ingr_key in self.ingredients:\n if not ingr_key:\n continue\n obj = caller.search(ingr_key, location=self.caller)\n # since ingredients are consumed we need extra check so we don't\n # try to include characters or accounts etc.\n if not obj:\n return\n if (\n not inherits_from(obj, \"evennia.objects.models.ObjectDB\")\n or obj.sessions.all()\n or not obj.access(caller, \"craft\", default=True)\n ):\n # We don't allow to include puppeted objects nor those with the\n # 'negative' permission 'nocraft'.\n caller.msg(\n obj.attributes.get(\n \"crafting_consumable_err_msg\",\n default=f\"{obj.get_display_name(looker=caller)} can't be used for this.\",\n )\n )\n return\n ingredients.append(obj)\n\n tools = []\n for tool_key in self.tools:\n if not tool_key:\n continue\n # tools are not consumed, can also exist in the current room\n obj = caller.search(tool_key)\n if not obj:\n return None\n if not obj.access(caller, \"craft\", default=True):\n caller.msg(\n obj.attributes.get(\n \"crafting_tool_err_msg\",\n default=f\"{obj.get_display_name(looker=caller)} can't be used for this.\",\n )\n )\n return\n tools.append(obj)\n\n # perform craft and make sure result is in inventory\n # (the recipe handles all returns to caller)\n result = craft(caller, self.recipe, *(tools + ingredients))\n if result:\n for obj in result:\n obj.location = caller",
"def craft(self, items):\n\n if items[0].looted and items[1].looted and items[2].looted:\n print(\"Seringue fabriquée ! Vous pouvez endormir le garde.\")\n self.stuff = [\"seringue\"]",
"async def _recipe_info(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n recipe = ' '.join(args)\n out = craft_helpers.print_recipe(ctx.user_object, recipe)\n await ctx.send(out)",
"async def craft(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, recipe = ch.parse_number_and_name(args)\n if number and recipe:\n out = craft_helpers.craft(ctx.user_object, recipe, n=min(number, MAX_PER_ACTION))\n await ctx.send(out)",
"def request_item(date_in, loc_in, item_in, meal_in, requisites):\n secrets = get_secrets()\n url = secrets.get('m_dining_api_main')\n location = '&location='\n date = '&date='\n meal = '&meal='\n\n #API url concatenation\n location += loc_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n\n if meal_in == '':\n meal_entered = False\n else:\n meal_entered = True\n\n #fetching json\n data = requests.get(url).json()\n\n possible_matches = []\n\n #Loop through meals\n for i in data['menu']['meal']:\n\n #If meal specified, only check specified meal\n if meal_entered and i['name'].upper() != meal_in.upper():\n continue\n #Skip meal if no food items available\n if 'course' not in i:\n continue\n\n #Loop through food items in course\n for j in i['course']:\n for key, value in j.items():\n if key == 'name':\n course_data = j['menuitem']\n meal_name = i['name']\n #Append matches to specified item to possible_matches list\n possible_matches = find_matches(course_data, possible_matches,\n item_in, meal_name, requisites)\n \n #Specified item found\n if possible_matches:\n possible_matches = find_item_formatting(possible_matches)\n text = 'Yes, there is '\n for i in range(len(possible_matches)):\n if len(possible_matches) > 1 and (i == len(possible_matches) - 1):\n text += ' and'\n text += ' ' + possible_matches[i]\n if i != len(possible_matches) - 1:\n text += ','\n\n #Specified item not found\n else:\n text = 'Sorry, that is not available'\n\n\n return {'fulfillmentText': text}",
"def post(self, user):\n data = request.json\n return save_new_recipe(data=data, user=user)",
"def view_recipe(request, recipe, **_kwargs):\n return render(request, \"deployments/disp_recipe.html\", {\"recipe\": recipe})",
"def view_recipes():\n if 'name' in session:\n recipeitem = PLAN.users[session['name']].view_recipes()\n return render_template('recipes.html', recipeitem=recipeitem)\n return redirect(url_for('log_in'))",
"def equip(self):\n item_name = input(\"What item do you want to equip?\\n>\")\n if item_name in self.backpack:\n item = self.backpack[item_name]\n else:\n return \"You don't have this\"\n if item.type in self.equipped:\n self.equipped[item.type] = item\n if item.type == \"Weapon\":\n self.strength = item.strength\n return f\"You have equipped {item.name} on {item.type} item slot\"\n else:\n return \"You can not equip this\"",
"def give(self):\n if self.location.character:\n item = input(f\"What do you want to give to {self.location.character.name}?\\n>\")\n if item in self.backpack:\n if self.location.character.give(item):\n if isinstance(self.location.character, Friend):\n loot = self.location.character.possession\n self.backpack[loot.name] = loot\n self.location.character.treat = None\n self.location.character.possession = None\n del self.backpack[item]\n return f\"{self.location.character.name} accepted your gift, and gave you {loot}\"\n if isinstance(self.location.character, Enemy):\n name = self.location.character.name\n self.location.character = None\n del self.backpack[item]\n return f\"You fend off {name} with {item}\"\n else:\n return f\"It does not accept {item}\"\n else:\n return f\"{self.location.character.name} does not like {item}\"\n else:\n return \"You don't have this\"\n else:\n return \"There is no one here\"",
"def recipe(self):\n return self.__recipe",
"async def item(self, ctx, raid: Raid):\n\n def check_author(m):\n return m.author == ctx.author\n\n if raid:\n # Raid Found, ask user to start entering items\n await ctx.send(RAID_FOUND.format(raid_id=raid.id,\n raid_event_name=raid.event_name,\n raid_date=raid.date))\n item_log = ''\n while True:\n # Wait for item entry: <Character> <DKP> <Item Name>\n try:\n msg = await ctx.bot.wait_for('message', check=check_author, timeout=60)\n except asyncio.TimeoutError:\n break\n\n response = msg.content.replace(\"<\", \"\").replace(\">\", \"\")\n\n if \"done\" in response.lower():\n break\n\n if \"cancel\" in response.lower():\n return None\n\n parts = response.split()\n if len(parts) < 3:\n await ctx.send(f'The following response `{msg.content}` was not valid. Please try again.')\n continue\n\n character_part = parts[0]\n item_value_part = parts[1]\n item_name_part = parts[2:]\n\n # Validate the character\n character = [c for c in self.characters if c.name.lower() == character_part.lower()]\n if not character:\n await ctx.send(f'The following character `{character_part}` was not valid. Please try again.')\n continue\n character = character[0]\n\n # Validate the item value\n if not item_value_part.isnumeric():\n await ctx.send(f'The following dkp of `{item_value_part}` is not a number. Please try again.')\n continue\n item_value = int(item_value_part)\n\n # TODO validate item_name\n item_name = ' '.join(item_name_part).capitalize()\n\n raid_item = eqdkp.create_raid_item(item_date=raid.date,\n item_name=item_name,\n item_raid_id=raid.id,\n item_value=item_value,\n item_buyers=[character.id])\n if raid_item:\n await ctx.send(\n f\"`{item_name} was successfully charged to {character.name} for {item_value} dkp. \"\n f\"Continue with the next item, or type done.`\")\n item_log += f\"> {item_name.ljust(30)}{character.name.ljust(20)}{str(item_value).rjust(5)} DKP\\n\"\n\n else:\n await ctx.send(f\"`ERROR: {item_name} failed to get entered. Please try again`\")\n\n # Find and edit the raid log in #dkp-entry-log channel\n if len(item_log) > 0:\n async with ctx.typing():\n channel = ctx.bot.dkp_entry_log_channel\n messages = await channel.history(limit=50).flatten()\n messages = [m for m in messages if f\"Raid Entry Log [{raid.id}]\" in m.content]\n if messages:\n message = messages[0]\n items_purchased = f\"\"\"\\n\\n* Items Purchased\\n{item_log}```\"\"\"\n content = message.content[:-3] + items_purchased\n await message.edit(content=content)\n return await ctx.send(f'All done! #{channel.name} has been edited.')\n else:\n return await ctx.send(\n f\"`ERROR: I wasn't able to edit #{channel.name}. Please do so manually.`\")",
"def recipe(id):\n\n selected_recipe = mongo.db.recipes.find_one({'_id': ObjectId(id)})\n\n # Using create list function to display these sections easier\n display_method = create_list(selected_recipe[\"method\"])\n display_ingredients = create_list(selected_recipe[\"ingredients\"])\n display_equipment = create_list(selected_recipe[\"equipment\"])\n\n show_ad = make_comparison(ad_equipment, display_equipment)\n\n return render_template('view_recipe.html', recipe=selected_recipe,\n title='Recipe', display_method=display_method,\n ad_equipment=ad_equipment,\n display_ingredients=display_ingredients,\n display_equipment=display_equipment,\n show_ad=show_ad)",
"def get_recipe(self, _id):\n raise NotImplementedError()",
"def item_from_feed(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)",
"def has_item(item: Item):\n async def _wrapper(ctx):\n if not (res := 0 < await ctx.db.get(\"items\", ctx.author, item.id)):\n name = (f\"an \" if any(item.name.startswith(vowel) for vowel in \"aeiou\") else \"a \") + f\"**{item}**\"\n await ctx.send(f\"You need to own {name} in order to use this command.\" + (\n f\" You can go buy one from the shop! (`{ctx.clean_prefix}shop`)\" if item.buyable else \"\"\n ))\n return res\n\n return discord.ext.commands.check(_wrapper)",
"def LookOn(play, item):\r\n\tspk(\"You start perusing the items on %s\" % item.name)\r\n\tif item.items != []:\r\n\t\tlookoner(play, item)\r\n\telse:\r\n\t\tspk(\"Nothing\")",
"def use(self):\n return_string = ''\n item = input(f\"What do you want to use?\\n>\")\n if item in self.backpack:\n if self.backpack[item].type is \"Food\":\n if (self.health + self.backpack[item].heal_amount) > standard_health:\n self.health = standard_health\n else:\n self.health += self.backpack[item].heal_amount\n self.backpack[item].charges -= 1\n return_string = f\"You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored\"\n if self.backpack[item].charges == 0:\n del self.backpack[item]\n return return_string\n else:\n return \"You cant eat this\"\n else:\n return \"You dont have this\"",
"def new_recipe(self):\n os.system(\"clear\")\n self.name = input(\"Recipe Name: \")\n self.ingredients = None\n self.steps=None\n self.description = None\n self.tags = {}\n self.edit_ingredients()\n self.edit_steps()\n if _yes_no_select(\"Would you like to add a description?\"):\n self.description = input(\"Description: \")\n self.edit_tags()\n while not self.check():\n continue",
"def lookiner(item):\r\n\tfrom menus import menu\r\n\tl = []\r\n\tfor n in item.items:\r\n\t\tl.append(n.name)\r\n\t\ti = menu(item.items[0].name, l)\r\n\tif i != None:\r\n\t\tf = item.items[i]\r\n\t\tf.add_options(item)\r\n\t\tr = f.option_list()\r\n\t\to = menu(r[0], r)\r\n\t\tif o == None:\r\n\t\t\tspk(\"You stop looking at the items in %s\" % item.name)\r\n\t\telif r[o] == 'take':\r\n\t\t\tspk(\"you remove %s from %s\" % (f.name, item.name))\r\n\t\t\ttake(item, f)\r\n\t\telse:\r\n\t\t\teval(f.options.get(r[o]))(play, f)\r\n\telse:\r\n\t\tspk(\"You stop looking at the items in %s\" % item.name)",
"def generate_quest(self):\n\n if odds(3):\n\n quest_items = add_dicts_together(items[\"master\"], items[self.p.square.square_type])\n quest_item = random.choice(list(quest_items.keys()))\n\n i = Item(quest_item, 0, **quest_items[quest_item])\n self.inventory.append(i)\n\n quantity = {'super rare': '1',\n 'rare': '2',\n 'uncommon': '3',\n 'common': '6',\n 'super common': '15'}\n q = quantity[i.rarity]\n\n self.quest = i, int(q), f\"{self.p.name}, if you bring \" \\\n f\"me {q} {i.plural if int(q) > 1 else remove_little_words(i.name)}, \" \\\n f\"I will teach you a valuable skill.\"\n return\n elif odds(5):\n mobs = []\n for biome, building in buildings.items():\n for b, attributes in building.items():\n if attributes.get('mobs'):\n for k in attributes['mobs'].keys():\n mobs.append(k)\n for biome, mob in wild_mobs.items():\n for k in mob.keys():\n mobs.append(k)\n target = f\"{mobs[random.randint(0, len(mobs)-1)]} named {names[random.randint(0, len(names)-1)]}\"\n print(f\"Well, we'll keep this off the record, but I can arrange for some money to find its way \"\n f\"into your account if you make {colored(target, 'yellow')} disappear, if you know what I mean...\")\n self.p.hit_list.append(target)\n return False\n\n else:\n return None",
"def do_eat(self, arg):\r\n itemToEat = arg.lower()\r\n\r\n if itemToEat == '':\r\n print('Eat what? Type \"inventory\" or \"inv\" to see whats in your inventory.')\r\n return\r\n\r\n cantEat = False\r\n\r\n for item in getAllItemsMatchingDesc(itemToEat, inventory):\r\n if worldItems[item].get(EDIBLE, False) == False:\r\n cantEat = True\r\n continue # there may be other items named this that you can eat, so we continue checking\r\n # NOTE - If you wanted to implement hunger levels, here is where\r\n # you would add code that changes the player's hunger level.\r\n print('You eat %s may your bowls forever question your terrible choices.' % (worldItems[item][SHORTDESC]))\r\n inventory.remove(item)\r\n return\r\n\r\n if cantEat:\r\n print('I dont think the \"%s\" would like you to do that...' % (worldItems[item][SHORTDESC]))\r\n else:\r\n print('You do not have \"%s\". Type \"inventory\" or \"inv\" to see what in your inventory.' % (itemToEat))",
"def set_recipe(self, recipe):\n self.__recipe.append(recipe)\n return 'Recipe created successfully'",
"def fridge_recipes(request):\n\n user = request.user\n fridge = Fridge.objects.get_or_create(user=user)[0]\n fridge_ingredients = fridge.ingredients.all()\n ingredient_names = [ingredient.name for ingredient in fridge_ingredients]\n recipes = recipes_containing(ingredient_names, fridge=fridge)\n\n content = {\n 'ingredients': ingredient_names,\n 'recipes': recipes,\n }\n\n return render(request, 'fridge/fridge_recipes.html', content)",
"def choose_item():\n print_items()\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n while True:\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == 'blueprint':\n blueprint = ViewMap()\n blueprint.print_map()\n print(\"Type 'back' to go to main menu.\")\n else:\n print(\"Type 'back' to go to main menu.\")\n print(\"You can view map by typing in 'blueprint'\")\n else:\n print(\"Type 'back' to go to main menu.\")",
"def food_item(request, food_id):\n\n food = get_object_or_404(Nutrition, pk=food_id)\n\n context = {\n 'food': food,\n }\n\n return render(request, 'nutrition/food.html', context)",
"def item_from_party(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)",
"def add_recipe():\r\n if \"user\" in session:\r\n cuisine, course, allergens = Helpers.dropdowns(coll_cuisines, coll_courses, coll_allergens)\r\n return render_template(\r\n \"addrecipe.html\",\r\n cuisine=sorted(cuisine),\r\n course=course,\r\n allergens=allergens)\r\n else:\r\n flash(\"You must be logged in to view this page!\")\r\n return redirect(url_for(\"users.login\"))",
"def recipe(request, recipe_id):\n recipe = Recipe.objects.get(id=recipe_id)\n\n if recipe.level == 'ES':\n recipeLevel = 'Łatwe'\n elif recipe.level == 'MM':\n recipeLevel = 'Średnie'\n elif recipe.level == 'HR':\n recipeLevel = 'Trudne'\n \n recipeOwner = False\n\n if recipe.owner == request.user:\n recipeOwner = True\n\n context = {'recipe': recipe, 'recipeLevel': recipeLevel, 'recipeOwner': recipeOwner}\n return render(request, 'foddys/recipe.html', context)",
"def edit_recipe(request, recipe, **_kwargs):\n return edit_view(request, _(\"Recipe\"), RecipeForm, recipe)"
]
| [
"0.6736054",
"0.61099",
"0.6082913",
"0.6016578",
"0.60134727",
"0.5981742",
"0.5953637",
"0.59410495",
"0.5934667",
"0.5928849",
"0.59241426",
"0.5899832",
"0.5897507",
"0.57860583",
"0.5768086",
"0.5627919",
"0.56226647",
"0.56204826",
"0.5610315",
"0.5566833",
"0.5565702",
"0.5549409",
"0.5548107",
"0.5541446",
"0.55376077",
"0.5536061",
"0.5532828",
"0.5530602",
"0.5523067",
"0.551967"
]
| 0.6734797 | 1 |
Returns True if the template name is a type of infobox | def is_infobox(self, name):
name = name.strip()
if name.lower().startswith('infobox'):
return True
if name == 'Armors_(NEW)':
return True
if name == 'All_inclusive_infobox_2020':
return True
if name.lower() == 'item':
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_template(self):\n\t\treturn bool(call_sdk_function('PrlFoundVmInfo_IsTemplate', self.handle))",
"def is_template(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTemplate', self.handle))",
"def istemplate(self, t):\n if isinstance(t, basestring):\n return t in self.template_types\n if isinstance(t, Sequence):\n return self.istemplate(t[0])\n return False",
"def _is_name_type(self, type_id):\n return type_id == self.name_type",
"def is_templated(self):\n for table in self.parent.tables:\n if isinstance(table, SettingTable):\n for row in table.rows:\n if row[0].lower() == \"test template\":\n return True\n return False",
"def is_type(self, type_name):\n\n return type_name in self._symtab",
"def _check_template_name(self, template):\n filename = os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), template, '__init__.ini')\n if self._check_file_exists(filename) and self._check_access(filename, os.R_OK):\n return True\n else:\n return False",
"def ismarker(typename, tree):\n if type(tree) is not With or len(tree.items) != 1:\n return False\n ctxmanager = tree.items[0].context_expr\n return type(ctxmanager) is Name and ctxmanager.id == typename",
"def check_tilename(self, tilename):\n\n check = False\n self.decode_tilename(tilename)\n check = True\n return check",
"def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))",
"def _has_template(self, target):\n\t\tif target.has_key('use'):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn None",
"def exists(self):\n try:\n select_template(self.get_paths())\n return True\n except TemplateDoesNotExist:\n return False",
"def has_template(page_text: str) -> bool:\n\n\tpattern = '<noinclude>.*{{documentation}}.*</noinclude>'\n\tif re.search(pattern, page_text, re.DOTALL | re.IGNORECASE):\n\t\treturn True\n\telse:\n\t\treturn False",
"def is_enabled_type(self):\r\n registry = queryUtility(IRegistry) \r\n if registry is None: \r\n # Don't show if the registry is not found\r\n return False\r\n settings = registry.forInterface(IIPnextViewletBlogSettings, \r\n check=False) \r\n _types = getattr(settings, 'allowed_types', '')\r\n this_type = self.context.Type()\r\n \r\n return this_type in _types",
"def getType(self):\n return \"Business Template\"",
"def __match_info_with_type(self, info: SheetInfo):\n if info.info_type is str:\n return QLineEdit()\n elif info.info_type is bool:\n return QCheckBox()\n else:\n raise TypeError(\"There is no matched GUI type for \" + str(info.info_type))",
"def has_notification_template(self):\n return self._has_notification_template",
"def is_type(self, typ):\n return typ == self.__class__.__name__",
"def is_typespace(schema_obj):\n\n return isinstance(schema_obj, schema.Typespace)",
"def test_for_template(self):\n self.assertTemplateUsed(self.response, 'my_info_template.html')",
"def exists_type(self, type):\n for i in range(1, self.grid_size - 1):\n for j in range(1, self.grid_size - 1):\n obj = self.grid.get(i, j)\n if obj and obj.type == type:\n return True\n return False",
"def getIsType(self):\n return _libsbml.MultiCompartmentPlugin_getIsType(self)",
"def is_type(obj: Any) -> bool:\n return type(obj).__name__ == \"type\"",
"def template_check(value):\n if isinstance(value, str):\n return value.lower() == \"true\"\n return value",
"def is_templated(self) -> bool:\n # We check two things:\n # * Source slice not empty: If it's empty, this means it doesn't appear\n # in the source, e.g. because it is new code generated by a lint fix.\n # Return False for these.\n # * It's not a literal slice. If it's a literal and has size then it's\n # not templated.\n assert self.pos_marker\n return (\n self.pos_marker.source_slice.start != self.pos_marker.source_slice.stop\n and not self.pos_marker.is_literal()\n )",
"def _valid_typable_object_with_name(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return (ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys() and\n _valid_object_with_name(ui_object))\n else:\n assert False, 'Wrong Platform'",
"def checkObjectInNameSpace(objectName):\n if objectName is None or not isinstance(objectName, basestring) or objectName == u\"\": return False\n if objectName in globals(): return True\n return objectName in dir(builtins)",
"def exclude_info_types(self) -> Optional[bool]:\n return pulumi.get(self, \"exclude_info_types\")",
"def has_name(self, name):\n\t\treturn name in self.classes",
"def has_name(self, name):\n\t\t\treturn name in self.classes"
]
| [
"0.719086",
"0.6656455",
"0.6562287",
"0.5975037",
"0.5917431",
"0.5775788",
"0.5742233",
"0.5733611",
"0.56911385",
"0.5621843",
"0.5567178",
"0.55560607",
"0.5515181",
"0.5500605",
"0.5496725",
"0.541349",
"0.5398289",
"0.5381162",
"0.5378755",
"0.53602856",
"0.535799",
"0.53570503",
"0.53382987",
"0.5337079",
"0.5335587",
"0.53243506",
"0.5299622",
"0.5289084",
"0.5286566",
"0.5282832"
]
| 0.7511678 | 0 |
Replies the user with the information from infobox of the specified item | async def info(self, msg, item=None, *args):
if not Guard.has_permission(msg, 'embed_links'):
await msg.channel.send(**{
'content': 'I need embed_links permission to answer in this channel',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
if not item:
return
if args:
item = f'{item} {" ".join(args)}'
canonical = await Controller.canonical_title(item)
if canonical:
item = canonical
page_url = Controller.link_from_title(item)
try:
wikitext = await Controller.get_wikitext(item)
except ValueError as e:
# Means the page is not found
await msg.channel.send(**{
'content': f'No page found for `{item}`',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
contents = []
template_names = []
for template in WTP.parse(wikitext).templates:
template_names.append(template.name.strip())
if self.is_infobox(template.name):
args = template.arguments
title = item
entries = {}
for arg in args:
k, v = arg.string.strip(' |\n').split('=')
k = k.strip()
v = v.strip()
if k.lower() in ['title1', 'name']:
# Set this as the item name
title = v
elif k.lower() in ['image1', 'image'] or not v:
# Skip images and empty values
continue
else:
entries[k] = v.replace('\n\n', '\n').replace('\n', '\n\t')
entries = [f'{k} = {v}' for k, v in entries.items()]
entries = '• '+'\n• '.join(entries)
content = f'## **{title}** ##\nSource: {page_url}\n{template.name.strip()}\n{entries}'
contents.append(content)
logging.info(f'Templates at {item}: '+', '.join(template_names))
if not contents:
await msg.channel.send(**{
'content': f'No infobox found for `{item}`',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
await msg.channel.send(**{
'content': '\n===\n'.join(contents),
'reference': msg.to_reference(),
'mention_author': True,
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def iteminfo(self, ctx, *, item: str):\n items = await self.bot.di.get_guild_items(ctx.guild)\n item = items.get(item)\n if not item:\n await ctx.send(await _(ctx, \"Item doesnt exist!\"))\n return\n if hasattr(item, \"description\"):\n embed = discord.Embed(title=item.name, description=item.description, color=randint(0, 0xFFFFFF),)\n else:\n embed = discord.Embed(title=item.name, color=randint(0, 0xFFFFFF),)\n\n embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)\n embed.add_field(name=await _(ctx, \"Name\"), value=item.name)\n img = item.meta.get(\"image\")\n embed.set_thumbnail(url=str(img)) if img else None\n for key, value in item.meta.items():\n if key == \"image\":\n continue\n embed.add_field(name=key, value=value)\n\n await ctx.send(embed=embed)",
"def OnInfoEdit(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n item = self.items[selections[0]]\r\n if self.gInfoBox.IsModified():\r\n self.data.setInfo(item,self.gInfoBox.GetValue())",
"def getInfo(self,item):\r\n return ''",
"def iteminfo():\n itemcode = input(\"Enter item code: \")\n if itemcode in FULLINVENTORY:\n printdict = FULLINVENTORY[itemcode]\n for key, value in printdict.items():\n print(\"{}:{}\".format(key, value))\n else:\n print(\"Item not found in inventory\")",
"async def info(self, ctx, name: str):\n name = name.lower()\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n if name not in self.db[server.id]:\n await self.bot.say(\"Please make sure that the name is spelled correctly and \"\n \"that you can find it in [p]box list\")\n return\n box = list(self.db[server.id][name][\"content\"].keys())\n values = list(self.db[server.id][name][\"content\"].values())\n value = sum(values)\n for x in range(len(values)):\n values[x] = values[x]/value\n box[x] = \" {:.2%} chance of getting \".format(values[x]) + box[x]\n msg = \"You can get the following items from the box:\\n\"\n msg += \"\\n\".join(box)\n for page in pagify(msg):\n await self.bot.say(page)",
"def info(self, membership, callback=None):",
"def show(self, item_id):\n pass",
"def OnSelect(self,event):\r\n index = event.GetSelection()\r\n item = self.items[index]\r\n self.data.select(item)\r\n if self.gInfoBox:\r\n self.gInfoBox.DiscardEdits()\r\n self.gInfoBox.SetValue(self.data.getInfo(item))",
"def manage_info():",
"def info_cmd(args):\n livebox_info()",
"async def info(self, ctx):\n if ctx.guild is not None:\n await ctx.reply(\"This command can only be used in DMs, because of privacy reasons.\")\n raise commands.CommandError(\"Invoker not in DMs.\")\n\n if not is_linked(ctx.author.id):\n await ctx.reply(f\"You don't have a Spotify account linked. Please link one using \"\n f\"`{self.bot_config['prefix']}link`.\")\n raise commands.CommandError(\"User has no spotify account linked.\")\n\n sp = init_spotify(ctx.author.id)\n result = sp.me()\n msg_embed = Embed()\n msg_embed.title = \"Linked Spotify account\"\n msg_embed.url = result['external_urls'].get('spotify', None)\n if len(result['images']) > 0:\n msg_embed.set_image(url=result['images'][0]['url'])\n msg_embed.add_field(name=\"Display name\", value=result['display_name'])\n msg_embed.add_field(name=\"Subscription type\", value=result.get('product', 'free'))\n if result.get('product', None) != \"premium\":\n msg_embed.add_field(name=\"Warning!\",\n value=\"Only accounts with Spotify Premium can use this bot!\",\n inline=False)\n await ctx.reply(embed=msg_embed)",
"def get_item_detail(item_id):\n pass",
"def api_item_details(item_id):\n if request.method == 'GET':\n item = session.query(Item, User).join(User).filter(Item.id == item_id).first()\n return jsonify(item.Item.to_json())\n # TODO - Add a POST method + HTTP Auth to allow a RESTful item modification",
"def show_item_details(item_id):\n item = session.query(Item, User).join(User).filter(Item.id == item_id).first()\n return render_template('item_details.html', item=item, login_session=login_session)",
"def quick_info_retrieve_view(request):\n kind_of_ballot_item = request.GET.get('kind_of_ballot_item', \"\")\n ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', \"\")\n return quick_info_retrieve_for_api(kind_of_ballot_item=kind_of_ballot_item,\n ballot_item_we_vote_id=ballot_item_we_vote_id)",
"def info(self, id):",
"def info(self):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Info')\n pp.pprint(self.manager.data[\"info\"])\n print('')",
"def item_info():\n item_code = get_input(\"Enter item code: \")\n if item_code in FULL_INVENTORY:\n print_dict = FULL_INVENTORY[item_code]\n output = \"\"\n for key, value in print_dict.items():\n output += (\"{}:{}{}\".format(key, value, \"\\n\"))\n else:\n output = \"Item not found in inventory\"\n print(output)\n return output",
"def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass",
"def get_info(self, info):\r\n pass",
"def info_msgbox(self, message, title):\n return os.system(\"zenity --info --text=\\\"%s\\\" --title=\\\"%s\\\"\" % (message, title))",
"def get_item_info(self, item_id):\n request_name = \"get_shop_info\"\n\n items = self.make_request(request_name, url_id=item_id)\n try:\n item = items[0]\n item_dict = dict()\n item_dict[\"id\"] = item[\"@id\"].encode('utf-8')\n item_dict[\"name\"] = item[\"label\"].encode('utf-8')\n item_dict[\"shelf\"] = item[\"shelf\"].encode('utf-8')\n item_dict[\"slot\"] = item[\"slot\"].encode('utf-8')\n item_dict[\"quantity\"] = item[\"quantity\"]\n return item_dict\n except Exception as e:\n print(\"Encountered exception while getting item\", item_id, \"\\n\", str(e))\n return None",
"def showinfo(self, msg):\n tkinter.messagebox.showinfo('Information', msg)",
"def received_information(update: Update, context: CallbackContext) -> int:\n text = update.message.text\n for a in user_d:\n category = user_d[a]\n if category == 'Public_Key' and len(text) == 58:\n assert len(text) == 58, update.message.reply_text(\"The address is invalid address\")\n user_d[category] = text\n elif category == 'Quantity' and type(int(text) == int):\n user_d[category] = int(text)\n elif category == 'Secret_Key' and len(text) > 58:\n user_d[category] = text\n else:\n user_d[category] = text\n user_data = context.user_data\n user_data[category] = user_d[category]\n\n update.message.reply_text(\n \"I got this from you:\\n\"\n f\"{facts_to_str(user_d)}\",\n reply_markup=markup_r,\n )\n user_d.clear()\n\n return CHOOSING",
"def show_info(title, message):\n\n pass",
"def showInfo(parent,message,title=_('Information')):\r\n return askStyled(parent,message,title,wx.OK|wx.ICON_INFORMATION)",
"def information(self, bot, update):\n update.message.reply_markdown(\"Nun werden einige Informationen zu Alfred angezeigt.\",\n reply_markup=self.option_markup)",
"def on_info_click(self, event):\n def on_close(event, wind):\n wind.Close()\n wind.Destroy()\n event.Skip()\n wind = wx.PopupTransientWindow(self, wx.RAISED_BORDER)\n if self.auto_save.GetValue():\n info = \"'auto-save' is currently selected. Temperature bounds will be saved when you click 'next' or 'back'.\"\n else:\n info = \"'auto-save' is not selected. Temperature bounds will only be saved when you click 'save'.\"\n text = wx.StaticText(wind, -1, info)\n box = wx.StaticBox(wind, -1, 'Info:')\n boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL)\n boxSizer.Add(text, 5, wx.ALL | wx.CENTER)\n exit_btn = wx.Button(wind, wx.ID_EXIT, 'Close')\n wind.Bind(wx.EVT_BUTTON, lambda evt: on_close(evt, wind), exit_btn)\n boxSizer.Add(exit_btn, 5, wx.ALL | wx.CENTER)\n wind.SetSizer(boxSizer)\n wind.Layout()\n wind.Popup()",
"def info_dialog(self, title, message):\n return self._impl.info_dialog(title, message)",
"def received_information(update: Update, context: CallbackContext) -> int:\r\n user_data = context.user_data\r\n text = update.message.text\r\n category = user_data['choice']\r\n user_data[category] = text\r\n del user_data['choice']\r\n\r\n update.message.reply_text(\r\n \"Genial, tu pedido está avanzando de esta manera:\"\r\n f\"{facts_to_str(user_data)}Puedes agregar algún comentario o cambio en tu orden en Comentarios...\",\r\n reply_markup=markup,\r\n )\r\n\r\n return CHOOSING"
]
| [
"0.6939309",
"0.66859245",
"0.6623628",
"0.6331634",
"0.63185996",
"0.6283439",
"0.6266595",
"0.6234912",
"0.61936915",
"0.6185648",
"0.6137834",
"0.61105543",
"0.6095368",
"0.6089556",
"0.60420245",
"0.60150623",
"0.59859926",
"0.5933654",
"0.59217507",
"0.59145945",
"0.58775806",
"0.5873832",
"0.58465147",
"0.58377796",
"0.5811968",
"0.5755401",
"0.5747379",
"0.5745538",
"0.57023",
"0.5700658"
]
| 0.6815587 | 1 |
Fetch and cache the trading table from wiki | async def get_trading_table(self):
if self.trading_table is None:
self.trading_table = {}
wikitext = await Controller.get_wikitext('Trading')
for match in re.finditer(r"===='''([^']+)'''====\n({\|[^\n]*\n(?:[^\n]*\n)+?\|})", wikitext):
place = match.group(1)
trade_list = {'into':{}, 'from':{}}
for row in match.group(2).strip().split('|-'):
if len(row) < 5:
continue
trade = re.search(r'\|([0-9,.]+)\|\| \[\[(?:[^|\]]+\|)?([^\]]+)\]\]\|\|→\n\|align\=right\|([0-9,.]+)\|\| \[\[(?:[^|\]]+\|)?([^\]]+)\]\]', row)
if not trade:
trade = re.search(r'\| ?([0-9,.]+) \[\[(?:[^|\]]+\|)?([^\]]+)\]\]\|\| ?([0-9,.]+) \[\[(?:[^|\]]+\|)?([^\]]+)\]\]', row)
if not trade:
logging.warn(f'No trade row in `{row}`')
continue
from_amt = int(trade.group(1).replace(',', ''))
from_itm = trade.group(2).lower()
to_amt = int(trade.group(3).replace(',', ''))
to_itm = trade.group(4).lower()
if from_itm not in trade_list['from']:
trade_list['from'][from_itm] = []
if to_itm not in trade_list['into']:
trade_list['into'][to_itm] = []
trade_list['from'][from_itm].append((to_itm, from_amt, to_amt))
trade_list['into'][to_itm].append((from_itm, to_amt, from_amt))
if '(' in place:
# Gorenichi (Kiev), Magnitogorsk (trader), Magnitogorsk (fitter)
if place[0] == 'G':
place = 'Kiev'
self.trading_table[place.lower()] = trade_list
return self.trading_table | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sp500_stocks_wiki(url=None):\n\n website_url = requests.get(url)\n soup = BeautifulSoup(website_url.text, 'lxml')\n my_table = soup.find('table', {'class': 'wikitable sortable'})\n my_table\n\n table_rows = my_table.find_all('tr')\n\n data = []\n for row in table_rows:\n data.append([t.text.strip() for t in row.find_all('td')])\n\n df = pd.DataFrame(data[1:], columns=['Ticker', 'Security', 'SEC_Filings',\n 'GICS', 'GICS_Sub', 'HQ',\n 'Date_First_Added', 'CIK', 'Founded'])\n\n return df",
"def load_data(session, tbls):\n ############\n # Data loading\n ############\n # trade base and trade attributes\n tbls.tradeAttributeTbl.load_csv(\n \"s3://data.atoti.io/notebooks/irrbb/TradeAttributes.csv\"\n )\n\n # Sensitivities - IRDelta, SIDelta, IRVega, Cashflow\n # loading sensitivities data will also trigger data loading into TradeBase table\n loadSensitivity(\n tbls.tradeBaseTbl,\n tbls.irDeltaTbl,\n \"https://data.atoti.io/notebooks/irrbb/IRDelta.csv\",\n \"DeltaSensitivities\",\n )\n loadSensitivity(\n tbls.tradeBaseTbl,\n tbls.siDeltaTbl,\n \"https://data.atoti.io/notebooks/irrbb/SIDelta.csv\",\n \"SIDeltaSensitivities\",\n )\n loadSensitivity(\n tbls.tradeBaseTbl,\n tbls.irVegaTbl,\n \"https://data.atoti.io/notebooks/irrbb/IRVega.csv\",\n \"VegaSensitivities\",\n )\n loadSensitivity(\n tbls.tradeBaseTbl,\n tbls.nmrCashFlowTbl,\n \"https://data.atoti.io/notebooks/irrbb/NMRCashFlow.csv\",\n \"CashFlowValues\",\n )\n\n tbls.portfolioTbl.load_csv(\"s3://data.atoti.io/notebooks/irrbb/BookParentChild.csv\")\n\n # historical risk factors\n histRFDF = process_historical_rf(\n \"https://data.atoti.io/notebooks/irrbb/HistoricalRiskFactor.csv\"\n )\n tbls.historicalRFTbl.load_pandas(histRFDF[tbls.historicalRFTbl.columns])\n tbls.historicalDateTbl.load_csv(\n \"s3://data.atoti.io/notebooks/irrbb/HistoricalDates.csv\"\n )\n\n # analysis hierarchy\n tbls.tenorsTbl.load_csv(\"s3://data.atoti.io/notebooks/irrbb/Tenors.csv\")",
"def fetchHistory(self, token):\n history = self.loadHistory(token)\n if len(history):\n startStamp = history[-1][\"timestamp\"] + 1000 + random.random()*1000 # Add some random number of seconds\n startDateStr = time.strftime(\"%Y%m%d\", time.gmtime(int(startStamp)))\n else:\n startDateStr = \"20130428\" # Date of the first bitcoin valuation ?\n dateStr = time.strftime(\"%Y%m%d\")\n uri = self.historyTemplate % (token, startDateStr, dateStr)\n print(\"Fetching history\")\n html = BeautifulSoup(urlrequest.urlopen(uri).read().decode(), \"html.parser\")\n print(\"parsing html\")\n dataRows = html.find(\"div\", {\"id\": \"historical-data\"}).find(\"table\", {\"id\", \"table\"}).find(\"tbody\").find_all(\"tr\", {\"class\": \"text-right\"})\n headers = [\"date.string\", \"open\", \"high\", \"low\", \"close\", \"volume\", \"market.cap\"]\n dataPts = []\n print(\"translating data\")\n for row in dataRows:\n rowObj = {}\n for i, td in enumerate(row.find_all(\"td\")):\n if i == 0:\n try:\n rowObj[headers[i]] = td.get_text()\n rowObj[\"timestamp\"] = helpers.stamp2dayStamp(datetime.datetime.strptime(td.get_text(), \"%b %d, %Y\").timestamp())\n except Exception:\n print(\"failed to parse float from `%s`\" % td.get_text())\n rowObj[headers[i]] = \"Dec 31, 1999\"\n elif i < 5:\n try:\n rowObj[headers[i]] = float(td.get_text())\n except Exception:\n print(\"failed to parse float from `%s`\" % td.get_text())\n rowObj[headers[i]] = 0.0\n else:\n try:\n rowObj[headers[i]] = int(td.get_text().replace(\",\", \"\"))\n except Exception:\n print(\"failed to parse integer from `%s`\" % td.get_text())\n rowObj[headers[i]] = 0\n dataPts.append(rowObj)\n for pt in sorted(dataPts, key=lambda p: p[\"timestamp\"]):\n if len(history) == 0 or pt[\"timestamp\"] > history[-1][\"timestamp\"]:\n history.append(pt)\n self.saveHistory(token, history)\n return history",
"def load_table_cache(tracer_id, model, table_name, verbose=True):\n filepath = paths.tracer_cache_filepath(tracer_id, model, table_name=table_name)\n printv(f'Loading table from cache: {filepath}', verbose)\n return pd.read_pickle(filepath)",
"def table_of_contents(self):\r\n toc_url = self.book_url + 'toc.xml'\r\n\r\n # cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)\r\n # course modules have a very short lifespan and are constantly being created and torn down.\r\n # Since this module in the __init__() method does a synchronous call to AWS to get the TOC\r\n # this is causing a big performance problem. So let's be a bit smarter about this and cache\r\n # each fetch and store in-mem for 10 minutes.\r\n # NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and\r\n # rewrite to use the traditional Django in-memory cache.\r\n try:\r\n # see if we already fetched this\r\n if toc_url in _cached_toc:\r\n (table_of_contents, timestamp) = _cached_toc[toc_url]\r\n age = datetime.now(UTC) - timestamp\r\n # expire every 10 minutes\r\n if age.seconds < 600:\r\n return table_of_contents\r\n except Exception as err:\r\n pass\r\n\r\n # Get the table of contents from S3\r\n log.info(\"Retrieving textbook table of contents from %s\" % toc_url)\r\n try:\r\n r = requests.get(toc_url)\r\n except Exception as err:\r\n msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)\r\n log.error(msg)\r\n raise Exception(msg)\r\n\r\n # TOC is XML. Parse it\r\n try:\r\n table_of_contents = etree.fromstring(r.text)\r\n except Exception as err:\r\n msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)\r\n log.error(msg)\r\n raise Exception(msg)\r\n\r\n return table_of_contents",
"def load_stats_table(url):\n\n req = requests.get(url)\n soup_page = BeautifulSoup(req.text, 'html.parser')\n stats_table = soup_page.table\n global_variables.page_number += 1\n return stats_table",
"def get_history_with_cache(conn, symbol: str, year: str):\n if not check_table_exist(conn, f\"History{year}\"):\n gen_table_for_history(conn, year)\n # load db as pandas Dataframe\n df = load_table_history(conn, year)\n # if not cached\n if not df.Symbol.isin([symbol]).any():\n try:\n response = get_history_for(symbol=symbol)\n for k, v in response[\"Time Series (Daily)\"].items():\n if k.startswith(year):\n insert_history(conn,\n symbol=symbol,\n date=k,\n open=v[\"1. open\"],\n high=v[\"2. high\"],\n low=v[\"3. low\"],\n close=v[\"4. close\"],\n volume=v[\"6. volume\"],\n adjusted=v[\"5. adjusted close\"])\n # when API call limit (5 per minute) reached\n except KeyError:\n print(\n f\"History({symbol}, {year}): API call limit reached.\",\n \"Try again later.\"\n )\n # reload database\n df = load_table_history(conn, year)\n # return price history for the given symbol & year\n return df.query(f\"Symbol == '{symbol}'\")",
"def fetch_data(self):",
"def _fetch(self, output_type='xml'):\n\n # authenticate\n self._auth()\n\n # get the table\n response = self._do('GET', self.URLS['adp'])\n\n # load results\n self._results = self._parse_doc(response.text)",
"def gethistory(ticker):\n link = 'http://ichart.finance.yahoo.com/table.csv?s=' + ticker\n response = urllib.urlopen(link)\n html = response.read()\n return readcsv(html)",
"def ht(self, force_import: bool = False) -> hl.Table:\n if self.path is None or force_import:\n return self.import_func(**self.import_args)\n else:\n return hl.read_table(self.path)",
"def read_all(table_id = None, \n language = 'en',\n base_url = 'http://data.ssb.no/api/v0', \n full_url = None):\n \n \n if full_url is None: \n full_url = '{base_url}/{language}/table/{table_id}'.format(\n base_url = base_url,\n language = language, \n table_id = table_id)\n \n query = full_json(full_url = full_url)\n data = requests.post(full_url, json = query)\n results = pyjstat.from_json_stat(data.json(object_pairs_hook=OrderedDict))\n \n # maybe this need not be its own function, \n # but an option in read_json? json = 'all'\n \n # other functions(options include: read_recent to get only the \n # most recent values (defined as x), json = 'recent')\n \n return results[0]",
"def get_table(self, keys, value, version=None):\n meta = self.get_metadata(keys, value, version)\n\n path = make_table_path(keys, value, version)\n url = '{root}/{path}'.format(root=self._root, path=path)\n\n try:\n r = requests.get(url)\n text = r.text\n\n self._write_cache(path, text)\n\n except (requests.ConnectionError, requests.Timeout):\n text = self._read_cache(path)\n\n converters = make_converters(meta)\n return pd.read_csv(six.StringIO(text), converters=converters)",
"def _get(self):\n self.lib.get()\n blob = self.get_cached_blob()\n if not blob:\n blob = self.knex.get(self.nested_url(), url_options=self.inherited_url_options)\n self.load_blob(blob)\n self.cache_blob(blob)\n else:\n self.load_blob(blob)",
"def load_status_table():",
"def scrape_data():\n soup = get_page_source_code(\"https://www.tradingview.com/markets/stocks-usa/market-movers-active/\")\n stock_table = soup.select(\".tv-data-table__tbody tr\")\n \n stock_data = []\n\n for data in stock_table:\n stock_name = data.find(\"a\").get_text(strip=True)\n stock_price = data.select(\"td\")[1].get_text(strip=True)\n percentage_change = data.select(\"td\")[2].get_text(strip=True)\n rating = data.select(\"td\")[4].get_text(strip=True)\n stock_volume = data.select(\"td\")[5].get_text(strip=True)\n\n details = {\n\n \"name\": stock_name,\n \"price\": stock_price,\n \"change\": percentage_change,\n \"rating\": rating,\n \"volume\": stock_volume,\n \n }\n stock_data.append(details)\n return stock_data",
"def get_tables(self) -> List[Table]:\n # Assemble API calls for concurrent execution\n calls = []\n for (year, table_name), variables in self.get_variables_by_year_and_table_name().items():\n # Handle multiple for_geo values by year\n chunked_variables_by_for_geo = product(self.for_geo, chunk_variables(variables))\n for for_geo, chunk in chunked_variables_by_for_geo:\n call = self._census_api.fetch_table(\n self.estimate, year, table_name, chunk, for_geo, self.in_geo\n )\n calls.append(call)\n # Make concurrent API calls\n results = asyncio.run(self._census_api.gather_calls(calls))\n tables = list(results)\n return tables",
"def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df",
"def fetchtl():\n try:\n tlid = dbacc.reqarg(\"tlid\", \"dbid\")\n if tlid:\n tl = dbacc.cfbk(\"Timeline\", \"dsId\", str(tlid), required=True)\n else:\n slug = dbacc.reqarg(\"slug\", \"string\")\n if not slug:\n slug = \"default\"\n slug = slug.lower() # in case someone camelcased the url.\n tl = dbacc.cfbk(\"Timeline\", \"slug\", slug, required=True)\n tls = contained_timelines(tl)\n # Note the timeline was fetched for daily stats tracking\n det = {\"referer\":flask.request.headers.get(\"Referer\", \"\"),\n \"useragent\":flask.request.headers.get(\"User-Agent\", \"\"),\n \"tlid\":tl[\"dsId\"], \"tlname\":tl[\"name\"],\n \"uid\":dbacc.reqarg(\"uid\", \"dbid\")}\n dcd = {\"dsType\":\"DayCount\", \"tstamp\":dbacc.timestamp(),\n \"rtype\":\"tlfetch\", \"detail\":json.dumps(det)}\n dbacc.write_entity(dcd)\n except ValueError as e:\n return util.serve_value_error(e)\n return util.respJSON(tls)",
"def getData(self, local_cache):",
"def get_table(self, table, format=\"FITS\", verbose=False):\n # make sure the table exists\n try:\n results = self.quick(\"select top 0 * from {}\".format(table),context=\"MYDB\")\n except Exception as e:\n # raise ValueError(\"table MyDB.{} not found\".format(table)) from None\n raise_from(ValueError(\"table MyDB.{} not found\".format(table)), None)\n # first try to get it as a quick request, which is much faster if it works\n try:\n return self.quick(\"select * from {}\".format(table),context=\"MYDB\",astropy=True)\n except Exception as e:\n pass\n \n # sigh, have to go through output queue\n t0 = time.time()\n format = format.upper()\n if format not in [\"FITS\",\"CSV\"]:\n # just force a good value\n format = \"FITS\"\n if verbose:\n print(\"Making output request for {}-format data\".format(format))\n job_id = self.request_output(table,format)\n status = self.monitor(job_id)\n if status[0] != 5:\n raise Exception(\"Output request failed.\")\n job_info = self.job_info(jobid=job_id)[0]\n url = job_info[\"OutputLoc\"]\n if format == \"FITS\":\n fh = fits.open(url)\n # TDIM keywords in the Casjobs FITS header are simply wrong\n # Have to delete them to avoid bad problems in astropy.io.fits\n del fh[1].header['TDIM*']\n tab = Table(fh[1].data)\n fh.close()\n else:\n r = requests.get(url)\n r.raise_for_status()\n tab = ascii.read(MastCasJobs.replacenull(r.text),format='csv')\n if verbose:\n print(\"{:.1f} s: Retrieved {} row {} table\".format(time.time()-t0,len(tab),format))\n return tab",
"def htable_get(table, key):",
"def _retrieveCachedData(self):",
"def _get_data(data_type, table, force):\n if force or data_type not in _cache:\n _cache[data_type] =read_table(table)\n return _cache[data_type]",
"def getStockData():\n pass",
"def _csv_get(page):\n cache_key = reverse('timetable.views.display')\n\n ret = cache.get(cache_key)\n if ret is not None:\n print 'hola'\n return ret\n else:\n print 'ciao'\n ret = _csv_download(page)\n cache.set(cache_key, ret, timeout=15) # cache lasts 15 seconds\n return ret",
"def load_goods_once(self, index, tid):\n url = self.joint_url(tid)\n title_json = proxy_req(url, 1)\n if not title_json:\n if can_retry(url, index):\n self.load_goods_once(index, tid)\n return\n content = BeautifulSoup(\n title_json['content'], 'html.parser')\n # return content\n content = content.find_all('div')\n if not len(content):\n if can_retry(url, index):\n self.load_goods_once(index, tid)\n return\n # print(len(content))\n text = []\n ttid = 0\n text.append(self.find_title(index))\n good_text = []\n describe = []\n title = ''\n url = ''\n tpud = ''\n\n for word in content:\n temp_text = ''\n temp_text = word.text\n if not len(temp_text):\n continue\n if len(temp_text) and temp_text not in self.special_list and not '€' in temp_text and ((temp_text[0].isdigit() and (not '【' in temp_text or '【已下架】'in temp_text)) or (temp_text[0] == '\\xa0' and not 'http' in temp_text and not '¥' in temp_text and not '微信' in temp_text and not '(' in temp_text) or (word.span and len(word.span.text.replace('\\xa0', '')) and (word.span['style'] == 'font-size:16px;color:#fc9db1;font-weight:bold;' or word.span['style'] == 'font-size:16px;color:#1e6792;background-color:#ffffff;font-weight:bold;'))):\n temp_text = temp_text.replace('\\xa0', ' ').replace('|', '')\n temp_text = temp_text.replace(\n '//', '').replace('¥', '').strip()\n if not re.search(r'\\d\\.\\d', temp_text):\n temp_text = temp_text.replace('.', ' ')\n elif temp_text.count('.') > 1:\n temp_text = temp_text.replace('.', ' ', 1)\n temp_list = temp_text.split()\n print(temp_list)\n if not len(temp_list):\n continue\n if ttid:\n text.append(' '.join([*good_text, *[url, tpud]]))\n url = ''\n tpud = ''\n ttid += 1\n describe = []\n good_text = []\n if len(title):\n text.append(title)\n title = ''\n if temp_list[0].isdigit():\n good_text.append(str(int(temp_list[0])))\n else:\n good_text.append(str(ttid))\n good_text.append(temp_list[0])\n if len(temp_list) == 1:\n continue\n if len(good_text) == 1:\n good_text.append(temp_list[1])\n elif temp_list[1].isdigit():\n good_text.append(str(int(temp_list[1])))\n if len(temp_list) > 2:\n describe = temp_list[2:]\n if len(temp_list) > 2 and temp_list[2].isdigit():\n good_text.append(str(int(temp_list[2])))\n elif len(temp_list) > 3 and temp_list[3].isdigit():\n good_text.append(str(int(temp_list[3])))\n describe = temp_list[2]\n if len(temp_list) > 4:\n describe = [*describe, *temp_list[4:]]\n elif len(temp_list) > 3 and len(temp_list[2]) > 3 and temp_list[2][2:].isdigit():\n if len(temp_list[3]) > 3 and temp_list[3][2:].isdigit():\n good_text.append(temp_list[2] + '/' + temp_list[3])\n else:\n good_text.append(str(int(temp_list[2][2:])))\n continue\n elif len(temp_list) > 2 and re.search(r'\\d', temp_list[2]):\n digit_list = re.findall(r\"\\d+\\.?\\d*\", temp_list[2])\n good_text.append(digit_list[0])\n if len(temp_list) > 3:\n describe = [*describe, *temp_list[3:]]\n elif len(temp_list) > 2:\n describe.append(temp_list[2])\n if len(temp_list) > 3:\n describe = temp_list[3:]\n elif 'http' in temp_text:\n temp_text = temp_text.replace('\\xa0', '').strip()\n print('http', temp_text)\n url = temp_text\n elif temp_text.count('€') == 2 or temp_text.count('¥') == 2:\n temp_text = temp_text.replace('\\xa0', '').strip()\n print('¥', temp_text)\n tpud = temp_text\n elif '【店铺链接】' in temp_text:\n temp_text = temp_text.replace('\\xa0', '').strip()\n print('【店铺链接】', temp_text)\n url += temp_text\n elif temp_text in self.title_list:\n print(2, temp_text)\n temp_text = temp_text.replace('\\xa0', '')\n title = temp_text\n elif len(good_text) == 1:\n temp_text = temp_text.replace('\\xa0', ' ').replace(\n '.', ' ').replace('¥', '').replace('|', '')\n temp_list = temp_text.split()\n print(3, temp_list)\n if not len(temp_list):\n continue\n elif len(temp_list) > 1 and temp_list[1].isdigit():\n good_text.append(temp_list[0])\n good_text.append(str(int(temp_list[1])))\n describe = temp_list[2:]\n else:\n describe.append(temp_text)\n elif temp_text.count('¥') == 1:\n temp_text = temp_text.replace('¥', '').replace(\n '\\xa0', '').replace('|', '').strip()\n digit_list = re.findall(r\"\\d+\\.?\\d*\", temp_text)\n print('$', digit_list)\n if len(digit_list):\n good_text.append(digit_list[0])\n else:\n temp_text = temp_text.replace('\\xa0', '')\n print(4, temp_text)\n describe.append(temp_text)\n if len(good_text):\n text.append(' '.join([*good_text, *[url, tpud]]))\n\n text.append(' ')\n self.goods[index] = text\n print(len(text))",
"def fetch_data(self) -> pd.DataFrame:\r\n os.chdir(r'\\\\192.168.8.90\\投研部\\Jessica\\test_data')\r\n if self.tic in ['RB.CCRI', 'HC.CCRI', 'I.CCRI', 'J.CCRI', 'JM.CCRI', 'ZC.CCRI']:\r\n f = pd.read_hdf('data.h5', 'snc')\r\n if self.tic in ['CU.CCRI', 'ZN.CCRI', 'AL.CCRI', 'NI.CCRI']:\r\n f = pd.read_hdf('data.h5', 'met')\r\n data = f.loc[f.loc[:, 'sec_code'] == self.tic, :]\r\n # extract I.CCRI data\r\n table = pd.pivot_table(data, index=['date'], columns=['factor_code'], values='factor_value')\r\n table = table.sort_values(by='date')\r\n \r\n return table",
"def getTableByIndex(self, index):\n pass",
"def fetch_data(self):\n\n data_dict = {\n 'price': self.get_current_price(),\n }\n\n return self.save_data(data_dict)"
]
| [
"0.5821737",
"0.5693989",
"0.5649378",
"0.5550121",
"0.5491645",
"0.5477678",
"0.5426335",
"0.5404699",
"0.5395954",
"0.5388061",
"0.5386293",
"0.53255683",
"0.5314863",
"0.5280537",
"0.5252164",
"0.52466863",
"0.5241401",
"0.52316666",
"0.5225064",
"0.52195275",
"0.5215217",
"0.52140605",
"0.5203037",
"0.51816314",
"0.5181494",
"0.5179921",
"0.5171343",
"0.51682866",
"0.5166676",
"0.51402265"
]
| 0.61209226 | 0 |
Replies the user with a list of places that trade for and from the item if the argument is an item name, and a list of possible trades if the argument is a location name If the argument is empty, replies the user with the list of possible trading locations | async def trader(self, msg, arg=None, *args):
trading_table = await self.get_trading_table()
self_delete = False
if not arg:
content = '• '+'\n• '.join(place.capitalize() for place in trading_table.keys())
content = f'Places you can trade:\n{content}'
else:
if args:
arg = f'{arg} {" ".join(args)}'
# Check for place name
place_aliases = [arg.lower(), f'{arg.lower()} (fitter)', f'{arg.lower()} (trader)']
content = ''
for place in place_aliases:
if len(place) <= 2:
continue
for location_name in trading_table:
if place.lower() in location_name.lower() and len(place)*2 >= len(location_name):
# A location name
trade_list = []
for from_itm, to_list in trading_table[location_name]['from'].items():
for to_itm, from_amt, to_amt in to_list:
trade_list.append(f'• With **{from_amt} {from_itm}**, you get **{to_amt} {to_itm}**')
if content:
content += '\n\n'
content += f'Trading in {place.capitalize()}:\n'+'\n'.join(trade_list)
if not content:
# An item name or not found
item = arg
from_list = []
into_list = []
for place, trade_lists in trading_table.items():
aliases = [item.lower(), item+'s'.lower(), item[:-1].lower() if item[-1] == 's' else '', item+' metal', 'sulfuric '+item]
for alias in aliases:
if not alias:
continue
if alias in trade_lists['from']:
for to_itm, from_amt, to_amt in trade_lists['from'][alias]:
from_list.append(f'• At **{place.capitalize()}** with *{from_amt} {item}*, you get **{to_amt} {to_itm}**')
if alias in trade_lists['into']:
for from_itm, to_amt, from_amt in trade_lists['into'][alias]:
into_list.append(f'• At **{place.capitalize()}** with **{from_amt} {from_itm}**, you get *{to_amt} {item}*')
total_list = from_list + into_list
if len(total_list) == 0:
content = f'Could not find any trading option for `{item}`'
self_delete = True
else:
content = f'Places that trades from and into {item}:\n'
content += '\n'.join(total_list)
response = {
'content': content,
'reference': msg.to_reference(),
'mention_author': True,
}
if self_delete:
response['delete_after'] = 3
await msg.channel.send(**response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request_item(date_in, loc_in, item_in, meal_in, requisites):\n secrets = get_secrets()\n url = secrets.get('m_dining_api_main')\n location = '&location='\n date = '&date='\n meal = '&meal='\n\n #API url concatenation\n location += loc_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n\n if meal_in == '':\n meal_entered = False\n else:\n meal_entered = True\n\n #fetching json\n data = requests.get(url).json()\n\n possible_matches = []\n\n #Loop through meals\n for i in data['menu']['meal']:\n\n #If meal specified, only check specified meal\n if meal_entered and i['name'].upper() != meal_in.upper():\n continue\n #Skip meal if no food items available\n if 'course' not in i:\n continue\n\n #Loop through food items in course\n for j in i['course']:\n for key, value in j.items():\n if key == 'name':\n course_data = j['menuitem']\n meal_name = i['name']\n #Append matches to specified item to possible_matches list\n possible_matches = find_matches(course_data, possible_matches,\n item_in, meal_name, requisites)\n \n #Specified item found\n if possible_matches:\n possible_matches = find_item_formatting(possible_matches)\n text = 'Yes, there is '\n for i in range(len(possible_matches)):\n if len(possible_matches) > 1 and (i == len(possible_matches) - 1):\n text += ' and'\n text += ' ' + possible_matches[i]\n if i != len(possible_matches) - 1:\n text += ','\n\n #Specified item not found\n else:\n text = 'Sorry, that is not available'\n\n\n return {'fulfillmentText': text}",
"def use_on(arguments, player):\n\n for i, arg in enumerate(arguments):\n if arg == \"on\":\n index_of_on = i\n break\n item_1 = \" \".join(arguments[:index_of_on]) # String of item_1 in name format\n item_2 = \" \".join(arguments[index_of_on + 1:]) # String of item_2 in name format\n\n # Gathering and finding item names\n inventory_names = [] # All possible names for items in inventory\n room_names = [] # All possible names for items in the room\n for item in player.inventory:\n inventory_names += item.name\n for item in world.tile_exists(player.location_x, player.location_y).items:\n room_names += item.name\n\n # Making sure first item is in player's inventory\n if item_1 not in [item.lower() for item in inventory_names]:\n print(\"'\" + item_1 + \"' not in your inventory.\")\n return\n\n # Gets actual item_1 object\n else:\n for i, item in enumerate(player.inventory):\n if item_1 in [name.lower() for name in item.name]:\n item_1_index = i\n break\n\n # Making sure second item is in player's inventory or in the room\n if item_2 not in [item.lower() for item in inventory_names]:\n if item_2 not in [item.lower() for item in room_names]:\n print(\"'\" + item_2 + \"' not in your inventory or anywhere nearby.\")\n return\n\n # Getting the actual item_2 object (only reaches here if it is in room or inventory)\n # WHAT IF THERE'S AN IDENTICALLY NAMED ITEM IN THE INVENTORY AND ROOM?\n for i, item in enumerate(player.inventory):\n if item_2 in [name.lower() for name in item.name]:\n item_2_index = i\n item_2_location = \"inventory\"\n break\n for i, item in enumerate(world.tile_exists(player.location_x, player.location_y).items):\n if item_2 in [name.lower() for name in item.name]:\n item_2_index = i\n item_2_location = \"room\"\n break\n\n # Calling the associated \"use\" function with the correct objects\n if item_2_location == \"inventory\":\n player.inventory[item_1_index].use(player.inventory[item_2_index], player)\n elif item_2_location == \"room\":\n player.inventory[item_1_index].use(\n world.tile_exists(player.location_x, player.location_y).items[item_2_index], player)\n return",
"def pick_place(choices_arg, question='Where to next?',inv=True):\r\n \r\n choices_alt = []\r\n \r\n if isinstance(choices_arg,list):\r\n choices = list(choices_arg)\r\n if inv:\r\n choices += ['inventory','map']\r\n \r\n elif isinstance(choices_arg,tuple):\r\n choices = choices_arg[0]\r\n choices_alt = choices_arg[1]\r\n if inv:\r\n choices += ['inventory','map']\r\n choices_alt += ['inventory','map']\r\n\r\n staying = True\r\n \r\n while staying:\r\n\r\n print question + '\\n'\r\n\r\n if choices_alt:\r\n for index in range(len(choices_alt)): #print alternate choices in menu form\r\n if str(choices[index]) == 'inventory':\r\n print\r\n print(str(index+1) + ': ' + str(choices_alt[index]))\r\n\r\n else:\r\n for index in range(len(choices)): #print choices in menu form\r\n if str(choices[index]) == 'inventory':\r\n print\r\n print(str(index+1) + ': ' + str(choices[index]))\r\n\r\n print('') #get some blank line in here yo\r\n chosen = raw_input('').lower()\r\n \r\n try:\r\n final = ''\r\n for index in range(len(choices)): #check if they typed a number\r\n item = choices[index]\r\n if index == int(chosen)-1:\r\n final = item\r\n staying = False\r\n if final == '':\r\n print 'Nice Try.\\n' #if they type a number not in range\r\n question = 'Try again, foo.'\r\n except:\r\n final = ''\r\n if choices_alt:\r\n for index in range(len(choices_alt)): #check if they typed letters\r\n item = choices_alt[index]\r\n if chosen == str(item).lower():\r\n final = choices[index]\r\n staying = False\r\n\r\n else:\r\n for index in range(len(choices)): #check if they typed letters\r\n item = choices[index]\r\n if chosen == str(item).lower():\r\n final = item\r\n staying = False\r\n if final == '':\r\n print 'Nice Try.\\n' #if they misspelled\r\n question = 'Try again, foo.'\r\n\r\n if final == 'map':\r\n inspect_map()\r\n question = 'Where to?'\r\n staying = True\r\n if final == 'inventory':\r\n inspect_inventory()\r\n question = 'Where to?'\r\n staying = True\r\n\r\n return final",
"def find_item(self, utterance, rasa_output):\n item = \"\" # This is the variable that will hold the item\n split_utterance = utterance.lower().split(\" \") # Split the user's input\n split_rasa_output = \" \".join(rasa_output.lower().split(\"?\")).split(\" \") # Split Rasa's output\n\n items = [item for item in split_rasa_output if item == \"music\" \n or item == \"heating\" or item == \"tv\" or item == \"light\"] # Identify the relevant items from Rasa's output]\n # Identifies the item to be chosen from the user input\n if (\"music\" in split_utterance):\n item = \"music\"\n elif (\"tv\" in split_utterance or \"television\" in split_utterance or \"telly\" in split_utterance):\n item = \"tv\"\n elif (\"heating\" in split_utterance):\n item = \"heating\"\n elif (\"lights\" in split_utterance or \"light\" in split_utterance):\n item = \"lights\"\n elif (\"first\" in split_utterance): \n item = items[0]\n elif (\"second\" in split_utterance): \n item = items[1]\n elif (\"third\" in split_utterance or \"last\" in split_utterance): \n item = items[2]\n else:\n return None\n\n return item",
"def get_oblist_from_args(self, loc):\n oblist, moved = [], []\n if self.args == \"all\":\n oblist = [ob for ob in loc.contents if ob != self.caller]\n val = self.get_money(loc)\n if val:\n moved.append(\"%d silver\" % val)\n elif self.check_switches((\"outfit\", \"outfits\")):\n oblist = self.get_oblist_from_outfit(loc)\n elif args_are_currency(self.args):\n val = self.get_money(loc)\n if val:\n moved.append(\"%d silver\" % val)\n else:\n obj = self.caller.search(\n self.args, location=loc, use_nicks=True, quiet=True\n )\n if not obj or len(make_iter(obj)) > 1:\n AT_SEARCH_RESULT(obj, self.caller, self.args, False)\n else:\n oblist = make_iter(obj)\n return oblist, moved",
"def _do_generate_webclient_stocklist(self) -> dict:\n # NOTE: as we want dicts and not Location instances, we go directly to\n # the 'SQL level' (session.execute() and not the 'ORM level' (session.query())\n # of sqlquery.\n loclst = self.get_location_list()\n itmlst = self.get_reagent_item_list()\n itmstat = self.get_reagent_item_status_list()\n\n # create a Dict[locationid, List[reagentitem]] and a Dict[RFID, reagentitem]\n d_d: typing.Dict[typing.Optional[int], typing.List[dict]] = {}\n # rfid_reagitem_dct = ff = {}\n f_f: typing.Dict[str, dict] = {}\n for reag_item in itmlst:\n loc_id = reag_item.get('qcs_location_id', None)\n # we will keep a list of items with None locations... should not happen, but does\n # then we add these to the UNKNOWN list later on\n d_d.setdefault(loc_id, []).append(reag_item)\n # if loc_id is not None:\n # else:\n # raise RuntimeError(\"found None location {}\".format(reag_item))\n #\n rfidstr = reag_item.get('rfid', None)\n if rfidstr is not None:\n if rfidstr != 'REPLACE ME':\n f_f.setdefault(rfidstr, reag_item)\n else:\n raise RuntimeError(\"found None location {}\".format(reag_item))\n # unmangling for None...\n # find loc_id for 'UNKNOWN'...\n if None in d_d:\n none_lst = d_d[None]\n del d_d[None]\n flst = [loc for loc in loclst if loc['name'] == 'UNKNOWN']\n assert len(flst) == 1, \"cannot determine 'UNKNOWN' location\"\n unknown_lst = d_d.setdefault(flst[0]['id'], [])\n unknown_lst.extend(none_lst)\n #\n # NOW, create a Dict[locationid, Tuple[locrecord, List[reagentitem]]]\n # which we send to the client\n r_r: typing.Dict[int, typing.Tuple[dict, typing.List[dict]]] = {}\n locid_reagitem_dct = r_r\n for location in loclst:\n loc_id = location.get('id', None)\n r_r[loc_id] = (location, d_d.get(loc_id, []))\n assert len(r_r) == len(loclst), \"problem with location ids!\"\n #\n # collect the state records for each reagent item...\n z_z: typing.Dict[int, list] = {}\n for state in itmstat:\n reag_item_id = state['qcs_reag_item_id']\n # we want to replace the occurred timedate entry with a simple date\n # to present to the user, i.e.\n # 'occurred': '2011-04-20T00:00:00Z' -> '2011-04-20'\n dstr = state['occurred']\n state['occurred'] = dstr.split('T')[0]\n z_z.setdefault(reag_item_id, []).append(state)\n # and evaluate the 'final state' for each reagent item\n ritemdct = {}\n for reag_item in itmlst:\n reag_item_id = reag_item['id']\n state_lst = z_z.get(reag_item_id, None)\n if state_lst is None:\n state_info = None\n else:\n state_info = self.calc_final_state(state_lst)\n # print(\"BLAAA {} {}\".format(reag_item_id, state_info))\n # we eliminate any reagent item that has a state of 'USED_UP'.\n dct, ismissing, hasexpired = state_info\n state_info = None if dct['status'] == 'USED_UP' else state_info\n if state_info is not None:\n ritemdct[reag_item_id] = (reag_item, state_info)\n # else:\n # print(\"skipping {}\".format(reag_item))\n # create a Dict[reagentid, reagent]\n rl = self.get_reagent_list()\n rg = {}\n for reagent in rl:\n # delete the legacy location field in reagents...\n reagent.pop('location', None)\n reagent_id = reagent.get('id', None)\n if reagent_id is not None:\n rg[reagent_id] = reagent\n else:\n raise RuntimeError(\"reagent ID is None\")\n assert len(rg) == len(rl), \"problem with reagent ids!\"\n # \"itmstatlst\": itmstat,\n # finally, sort the loclst according to a hierarchy\n loclst = sortloclist(loclst)\n # , \"rfiddct\": rfid_reagitem_dct}\n return {\"loclst\": loclst, \"locdct\": locid_reagitem_dct,\n \"ritemdct\": ritemdct, \"reagentdct\": rg}",
"def interact_with(arguments, player):\n inputted_item = \" \".join(arguments)\n current_loc = world.tile_exists(player.location_x, player.location_y)\n\n inventory_names = []\n for item in player.inventory:\n for name in item.name:\n inventory_names.append(name.lower())\n room_names = []\n for item in current_loc.items:\n for name in item.name:\n room_names.append(name.lower())\n\n # If it's in player inventory\n if inputted_item in inventory_names:\n for i, item in enumerate(player.inventory):\n if inputted_item in [name.lower() for name in item.name]:\n player.inventory[i].interact(player)\n return\n # If it's in the room\n elif inputted_item in room_names:\n for i, item in enumerate(current_loc.items):\n if inputted_item in [name.lower() for name in item.name]:\n current_loc.items[i].interact(player)\n return\n # If it's not in inventory or room\n else: #TODO: POSSIBLE ERROR - WHAT IF THERE'S AN IDENTICALLY NAMED ITEM IN THE INVENTORY AND ROOM?\n print(\"Can't do that\")\n return",
"def markets(self, irc, msg, args):\n locationIDs = self._sql(\"\"\"\n SELECT \"locationID\" FROM evecentral_market\"\"\", None, single=False)\n if len(locationIDs) == 0:\n irc.reply('No prices have been indexed yet.', prefixNick=False)\n return\n output = []\n for locationID in locationIDs:\n locationID = locationID[0]\n location = self._get_location(locationID)\n if locationID < 30000000:\n # This would be a region\n output.append(ircutils.bold(location['itemName']))\n else:\n output.append(self._colorize_system(location))\n irc.reply(', '.join(output), prefixNick=False)",
"def check_commute(intent, session):\n user_data = database.get_user_data(session['user']['userId'])\n if not user_data:\n return reply.build(\"I don't remember any of your addresses. \"\n \"You can ask me to \\\"save an address\\\" \"\n \"if you want me to be able to check \"\n \"on your daily commute.\",\n is_end=True)\n stations = location.get_stations(config.bikes_api)\n utter = ''\n card_text = ['Checked at %s' % _time_string()]\n first_phrase = True\n for which, av_func, av_name in \\\n [('origin', _get_bikes_available, 'bikes'),\n ('destination', _get_docks_available, 'docks')]:\n if user_data.get(which):\n lat = user_data[which]['latitude']\n lon = user_data[which]['longitude']\n nearest_st = geocoding.station_from_lat_lon(\n lat, lon, stations, n_nearest=2)\n\n n_thing = av_func(nearest_st[0])\n st_name = location.text_to_speech(nearest_st[0]['name'])\n av_slice = slice(0, (-1 if n_thing == 1 else None)) # singular?\n phrase = ('%d %s at the %s station' %\n (n_thing, av_name[av_slice], st_name))\n if first_phrase:\n verb = 'is' if n_thing == 1 else 'are'\n phrase = ('There %s ' % verb) + phrase\n else:\n phrase = ', and ' + phrase\n utter += phrase\n first_phrase = False\n card_text.append(\"%s: %d %s at %s\" %\n (which.capitalize(),\n n_thing,\n av_name[av_slice],\n nearest_st[0]['name']))\n\n if n_thing < 3:\n # If there's not many bikes/docks at the best station,\n # refer users to the next nearest station.\n n_thing = av_func(nearest_st[1])\n av_slice = slice(0, (-1 if n_thing == 1 else None)) # singular?\n st_name = location.text_to_speech(nearest_st[1]['name'])\n utter += (', and %d %s at the next nearest station, %s. ' %\n (n_thing, av_name[av_slice], st_name))\n first_phrase = True # Start a new sentence next time\n card_text.append(\"Next Best %s: %d %s at %s\" %\n (which.capitalize(),\n n_thing,\n av_name[av_slice],\n nearest_st[1]['name']))\n\n return reply.build(utter,\n card_title=(\"Your %s Commute Status\" %\n config.network_name),\n card_text='\\n'.join(card_text),\n is_end=True)",
"def get_items(data, requisites, formatted):\n returndata = \"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n\n for item in item_data:\n if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:\n returndata += (prefix + (item['name']).rstrip(', ') + suffix)\n\n return returndata",
"def request_location_and_meal(date_in, loc_in, meal_in, requisites):\n\n #preset vars\n url = 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'\n location = '&location='\n date = '&date='\n meal = '&meal='\n\n #API url concatenation\n location += loc_in\n meal += meal_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n\n #fetching json\n data = requests.get(url).json()\n\n #checking if specified meal available\n if check_meal_available(data, meal_in):\n returnstring = (get_items(data, requisites, False)).rstrip(', ')\n return format_plural(returnstring)\n else:\n return \"No meal is available\"",
"def go_near(furniture_name, robot_teleport):\n if furniture_name == \"livingroom_coffeetable\":\n print(\"Request to put robot at livingroom_coffeetable.\")\n x_y_z_yaw_pitch_roll = {\"x\": 4.5, \"y\": 7.3, \"z\": 0, \"yaw\": 3.8, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_chest\":\n print(\"Request to put robot at bedroom_chest.\")\n x_y_z_yaw_pitch_roll = {\"x\": 5, \"y\": 11.3, \"z\": 0, \"yaw\": 0.0, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_console\":\n print(\"Request to put robot at bedroom_console.\")\n x_y_z_yaw_pitch_roll = {\"x\": 4.2, \"y\": 12.2, \"z\": 0, \"yaw\": math.pi/2, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_bedsidetable\":\n print(\"Request to put robot at bedroom_bedsidetable.\")\n x_y_z_yaw_pitch_roll = {\"x\": 3.1, \"y\": 12.1, \"z\": 0, \"yaw\": math.pi, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"bedroom_shelf\":\n print(\"Request to put robot at bedroom_shelf.\")\n x_y_z_yaw_pitch_roll = {\"x\": 2.4, \"y\": 9.8, \"z\": 0, \"yaw\": 3*math.pi/2, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"kitchen_cupboard\":\n print(\"Request to put robot at kitchen_cupboard.\")\n x_y_z_yaw_pitch_roll = {\"x\": 6.7, \"y\": 10.6, \"z\": 0, \"yaw\": math.pi, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"kitchen_table\":\n print(\"Request to put robot at kitchen_table\")\n x_y_z_yaw_pitch_roll = {\"x\": 7.8, \"y\": 10.2, \"z\": 0, \"yaw\": math.pi/8, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n elif furniture_name == \"livingroom_table\":\n print(\"Request to put robot at livingroom_table\")\n x_y_z_yaw_pitch_roll = {\"x\": 7.4, \"y\": 7.6, \"z\": 0, \"yaw\": 3*math.pi/2, \"pitch\": 0, \"roll\": 0}\n robot_teleport.publish(x_y_z_yaw_pitch_roll)\n else:\n print(\"Unknown furniture: \" + furniture_name)",
"def _process_main_list(self, data):\n try:\n source = QXmlInputSource()\n source.setData(data)\n self._reader.parse(source)\n self._has_list = True\n self._places = self._osm_hand.get_places()\n except Exception:\n self.send_error(\"Invalid Response.\")\n return\n\n if len(self._places) == 0:\n self.send_error(\"No place found.\")\n return\n\n for place in self._places:\n try:\n url = \"http://nominatim.openstreetmap.org\" \\\n \"/reverse?format=xml&osm_type=N&osm_id=%s\" % place.get_osm_id()\n self._net_if.download(url)\n print \"%s, %s\" % (place.get_name(), place.get_amenity())\n except Exception:\n continue",
"def search_items(self):\n urls = []\n prices = []\n names = []\n for item in self.items:\n print(f\"Searching for {item}...\")\n\n self.driver.get(self.amazon_url)\n #select = Select(self.driver.find_element_by_id(\"searchDropdownDescription\"))\n #select.select_by_visible_text('All Departments')\n\n search_input = self.driver.find_element_by_id(\"twotabsearchtextbox\")\n search_input.send_keys(item)\n\n time.sleep(2)\n #wait = WebDriverWait(self.driver, self.explicit_wait)\n #wait.until(EC.presence_of_all_elements_located((By.ID, \"twotabsearchtextbox\")))\n\n search_button = self.driver.find_element_by_xpath('//*[@id=\"nav-search\"]/form/div[2]/div/input')\n search_button.click()\n\n time.sleep(2)\n\n t = self.driver.find_element_by_id(\"result_0\")\n asin = t.get_attribute(\"data-asin\")\n url = \"https://www.amazon.ca/dp/\" + asin\n price = self.get_product_price(url)\n name = self.get_product_name(url)\n \n prices.append(price)\n urls.append(url)\n names.append(name)\n\n print(name)\n print(price)\n print(url)\n\n time.sleep(2)\n\n return prices, urls, names",
"def search_for_contracts(self, market=None, buy_sell=None, type_=None, contracts=\"All\"):\n if not contracts:\n contracts = []\n if not type_:\n pass\n elif type_.lower() in ['yes', 'long'] and buy_sell == 'buy':\n type_ = {'long': 'BestBuyYesCost'}\n elif type_.lower() in ['no', 'short'] and buy_sell == 'buy':\n type_ = {'short': 'BestBuyNoCost'}\n elif type_.lower() in ['yes', 'long'] and buy_sell == 'sell':\n type_ = {'long': 'BestSellYesCost'}\n elif type_.lower() in ['no', 'short'] and buy_sell == 'sell':\n type_ = {'short': 'BestSellNoCost'}\n \n if not market:\n market_links = [(\"us_election\", 'https://www.predictit.org/api/marketdata/category/6'), (\"us_politics\", 'https://www.predictit.org/api/marketdata/category/13'), (\"world_politics\", 'https://www.predictit.org/api/marketdata/category/4')]\n elif 'us' and 'election' in market.replace('.', '').lower():\n market_links = [(\"us_elections\", 'https://www.predictit.org/api/marketdata/category/6')]\n elif 'us' and 'politic' in market.replace('.', '').lower():\n market_links = [(\"us_politics\",'https://www.predictit.org/api/marketdata/category/13')]\n elif 'world' in market.lower():\n market_links = [(\"world_politics\", 'https://www.predictit.org/api/marketdata/category/4')]\n \n \n\n market_data=[]\n for category, market_link in market_links:\n markets = list(self.browser.get(market_link).json()['Markets'])\n for market in markets:\n market = market\n market[\"Category\"] = category\n market[\"References\"]=[]\n wikidict={\"Trump\": \"http://dbpedia.org/resource/Donald_Trump\", \"Clinton\": \"http://dbpedia.org/resource/Hillary_Clinton\", \"Ossoff\": \"https://en.wikipedia.org/wiki/Jon_Ossoff\", \"Virginia\": \"https://en.wikipedia.org/wiki/Virginia\", \"Georgia\": \"https://en.wikipedia.org/wiki/Georgia_(U.S._state)\",\"Election\":\"https://en.wikipedia.org/wiki/Elections_in_the_United_States\"}\n \n for thing in [\"Trump\", \"Clinton\", \"Ossoff\", \"Virginia\", \"Georgia\",\"Election\"]:\n if thing.lower() in [element.lower() for element in market[\"Name\"].split()]:\n market[\"References\"].append(wikidict[thing])\n market_data.append(json.dumps(market))\n \n return market_data",
"def show_places():\n t0 = time.time()\n print(f\"--- {request}\")\n print(f\"--- {user_session}\")\n # Set context by owner and the data selections\n u_context = UserContext(user_session, current_user, request)\n # Which range of data is shown\n u_context.set_scope_from_request(request, \"place_scope\")\n u_context.count = request.args.get(\"c\", 50, type=int)\n\n with PlaceReader(\"read\", u_context) as service:\n # reader = PlaceReader(readservice, u_context)\n # The 'items' list has Place objects, which include also the lists of\n # nearest upper and lower Places as place[i].upper[] and place[i].lower[]\n res = service.get_place_list()\n\n if res[\"status\"] == Status.NOT_FOUND:\n print(f'bp.scene.routes.show_places: {_(\"No places found\")}')\n elif res[\"status\"] != Status.OK:\n print(\n f'bp.scene.routes.show_places: {_(\"Could not get places\")}: {res.get(\"statustext\")}'\n )\n\n elapsed = time.time() - t0\n stk_logger(\n u_context,\n f\"-> bp.scene.routes.show_places n={len(res.get('items'))} e={elapsed:.3f}\",\n )\n return render_template(\n \"/scene/places.html\",\n places=res[\"items\"],\n menuno=4,\n user_context=u_context,\n elapsed=elapsed,\n )",
"def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)",
"def search_helper(name_list, operation, twitter_dict): \r\n return_list = []\r\n \r\n for name in name_list:\r\n if operation == 'following':\r\n search_specified_list = twitter_dict[name]['following']\r\n for following_names in search_specified_list: \r\n if following_names not in return_list: \r\n return_list.append(following_names) \r\n \r\n elif operation == 'followers':\r\n followers = all_followers(twitter_dict, name)\r\n for followers_name in followers: \r\n if followers_name not in return_list: \r\n return_list.append(followers_name) \r\n \r\n return return_list",
"def suppliers(ticker: str, other_args: List[str]):\n parser = argparse.ArgumentParser(\n prog=\"supplier\",\n add_help=False,\n description=\"List of suppliers from ticker provided. [Source: CSIMarket]\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n url_supply_chain = f\"https://csimarket.com/stocks/competitionNO3.php?supply&code={ticker.upper()}\"\n text_supplier_chain = BeautifulSoup(requests.get(url_supply_chain).text, \"lxml\")\n\n l_supplier = list()\n for supplier in text_supplier_chain.findAll(\n \"td\", {\"class\": \"plavat svjetlirub dae al\"}\n ):\n l_supplier.append(supplier.text)\n\n if l_supplier:\n print(\"List of Suppliers: \" + \", \".join(l_supplier) + \"\\n\")\n else:\n print(\"No suppliers found.\\n\")\n\n except Exception as e:\n print(e, \"\\n\")",
"def run_checklist(items):\n\tuser_responses = OrderedDict()\n\n\ttotal_items = 0\n\tfor item in items:\n\t\ttotal_items = total_items + 1\n\t\n\t#Ask questions\n\tcurrent_item_number = 1\n\tfor item in items:\n\t\tprint (\"%i of %i: \" % (current_item_number, total_items) + item)\n\t\tanswer = raw_input(\"> \")\n\t\tuser_responses[item] = answer\n\t\tcurrent_item_number = current_item_number + 1\n\t#Todo: Plain text Antyhing elsE?\n\tprint (\"\\nChecklist complete.\")\n\treturn user_responses",
"def main(query):\n detail_response = search_for_trips(query.departure_date, query.start_point, query.end_point)\n if detail_response:\n all_journeys = blablacar_journey(detail_response, query.departure_date, query.start_point, query.end_point)\n else:\n all_journeys = list()\n return all_journeys",
"def get_take(self, item):\n item = ' '.join(item)\n if item == 'edelweiss':\n if self.finished_places == 3:\n self.finished_places += 1\n return self\n return super(East, self).get_take(item)",
"async def trade(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if len(args) < 4:\n await ctx.send('Arguments missing. '\n 'Syntax is `~trade [name] [number] [item] [offer]`.')\n return\n\n try:\n trade = {'user1': ctx.author.id,\n 'user2': args[0],\n 'amount1': args[1],\n 'amount2': args[-1],\n 'item1': ' '.join(args[2:-1]),\n 'item2': 'coins'}\n ctx.bot.trade_manager.add_trade(ctx, trade)\n except TradeError as e:\n await ctx.send(e.msg)\n return\n\n name = args[0]\n for member in ctx.guild.members:\n if name.lower() in member.name.lower():\n name_member = member\n break\n\n offer = users.parse_int(args[-1])\n number = users.parse_int(args[1])\n itemid = items.find_by_name(' '.join(args[2:-1]))\n name = get_display_name(ctx.author)\n offer_formatted = '{:,}'.format(offer)\n out = (f'{items.SHOP_HEADER}{name.title()} wants to sell {name_member.mention} '\n f'{items.add_plural(number, itemid)} for {offer_formatted} coins. '\n f'To accept this offer, reply to this post with a :thumbsup:. '\n f'Otherwise, this offer will expire in one minute.')\n msg = await ctx.send(out)\n\n if await self.confirm(ctx, msg, out, timeout=60):\n price = {\"0\": offer}\n users.update_inventory(name_member.id, price, remove=True)\n users.update_inventory(ctx.author.id, price)\n loot = {itemid: number}\n users.update_inventory(ctx.author.id, loot, remove=True)\n users.update_inventory(name_member.id, loot)\n\n buyer_name = get_display_name(name_member)\n await ctx.send(f'{items.SHOP_HEADER}{name.title()} successfully sold '\n f'{items.add_plural(number, itemid)} to {buyer_name} for '\n f'{offer_formatted} coins!')\n ctx.bot.trade_manager.reset_trade(trade, ctx.author.id, name_member.id)",
"def get_places(location, keyword):\n if location is None or keyword is None:\n return None\n\n api_key = ''\n\n search_term = '%s %s' % (location, keyword)\n places_url = 'https://maps.googleapis.com/maps/api/place/textsearch/json?' \\\n 'query=%s&key=%s' % (search_term, api_key)\n\n places_response = requests.get(places_url)\n if not places_response.ok:\n return None\n\n else:\n data = json.loads(places_response.text)\n if data['status'] != 'OK':\n return None\n\n else:\n # Store all place information as a list of dictionaries.\n places_list = []\n for place in data['results']:\n address = place['formatted_address']\n name = place['name']\n try:\n open_bool = place['opening_hours']['open_now']\n except KeyError:\n open_bool = 'n/a'\n try:\n rating = place['rating']\n rating_total = place['user_ratings_total']\n except KeyError:\n rating = 'n/a'\n rating_total = 'n/a'\n\n p_dict = {'address': address, 'name': name, 'open': open_bool,\n 'rating': rating, 'total': rating_total}\n\n places_list.append(p_dict)\n\n return places_list",
"def get_take(self, item):\n item = ' '.join(item)\n if str(item) == 'all':\n if self.finished_places == 0:\n self.items = ['edelweiss']\n self.items.append('prism')\n self.items.append('pickle')\n self.finished_places += 1\n elif(self.items):\n self.items.append(item)\n else:\n self.items = [item]\n return self",
"def do_list_items(self, arg):\n try:\n cprint (\"These are your items: \\n\", 'blue')\n my_items = arg[\"<all_items>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n my_items_str = \" \".join(my_items)\n print(my_items_str)\n elif choice == \"id\":\n my_items_str = int(\" \".join(my_items))\n print (my_items_str)\n app.ToDoApp.to_view_items(my_items_str)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')",
"def search(usr, item, area = \"shop\", scope = \"exact\", min = \"0\", max = \"99999\"):\n if not usr:\n raise invalidUser\n\n if not item:\n raise invalidSearch\n\n if area != ShopWizard.SHOP and area != ShopWizard.GALLERY:\n logging.getLogger(\"neolib.shop\").info(\"Invalid area supplied for shop wizard search: \" + area)\n raise invalidSearch\n\n if scope != ShopWizard.CONTAINING and scope != ShopWizard.EXACT:\n logging.getLogger(\"neolib.shop\").info(\"Invalid scope supplied for shop wizard search: \" + area)\n raise invalidSearch\n\n if int(min) < 0:\n logging.getLogger(\"neolib.shop\").info(\"Invalid min value supplied for shop wizard search: \" + min)\n raise invalidSearch\n\n if int(max) > 99999:\n logging.getLogger(\"neolib.shop\").info(\"Invalid max value supplied for shop wizard search: \" + max)\n raise invalidSearch\n\n if isinstance(item, Item):\n item = item.name\n\n pg = usr.getPage(\"http://www.neopets.com/market.phtml?type=wizard\")\n\n form = pg.form(action=\"market.phtml\")\n form.update({'shopwizard': item, 'table': area, 'criteria': scope, 'min_price': str(min), 'max_price': str(max)})\n pg = form.submit()\n\n # Indicates shop wizard banned\n if \"too many searches\" in pg.content:\n time = pg.find(\"b\", text = \"Whoa there, too many searches!\").parent.p.b.item\n e = shopWizBanned()\n e.time = time\n raise e\n\n # Indicates a faerie quest\n if \"You're working for a faerie\" in pg.content:\n logging.getLogger(\"neolib.shop\").info(\"Could not search for \" + item + \". A Faerie quest is active\")\n raise activeQuest\n\n if \"did not find\" in pg.content:\n if item in pg.content:\n return False # Indicates UB item\n elif \"...</span>\" in pg.content:\n # Probably invalid item\n raise invalidSearch\n\n return ShopWizardResult(pg, usr)",
"def find_items():\n check50.exists(\"item.py\")\n try:\n check50.run(run_command).stdin(\"in\").stdout(room_3_items)\n except check50.Failure as error:\n raise check50.Failure(\"Could not find items upon first entering room.\\n\" +\n \" Remember to seperate multiple items by a single newline\\n\" +\n f\" {error}\")\n # Check for look command\n try:\n check = check50.run(run_command)\n moves = [\"IN\", \"OUT\", \"IN\", \"LOOK\"]\n\n for move in moves:\n check.stdout(\"> \")\n check.stdin(move, prompt=False)\n\n check.stdout(\"KEYS: a set of keys\")\n except check50.Failure as error:\n raise check50.Failure(f\"Could not find items when using LOOK.\\n {error}\")",
"def place_buy_order(self):\n price = request.form[\"price\"]\n stocks = request.form[\"stocks\"]\n trader_id = request.form[\"trader_id\"]\n self.market.place_buy_order(trader_id, price, stocks)\n return \"\"",
"def lookup(self,user_preferences):\n res = list()\n \n fit_area = set()\n fit_price = set()\n fit_food = set()\n \n if user_preferences[0] == \"any\" or user_preferences[0] == 0:\n fit_area = set(range(len(self.area)))\n else:\n for i,a in enumerate(self.area):\n if a == user_preferences[0]:\n fit_area.add(i)\n if user_preferences[1] == \"any\" or user_preferences[1] == 0:\n fit_price = set(range(len(self.price_range)))\n else:\n for j,p in enumerate(self.price_range):\n if p == user_preferences[1]:\n fit_price.add(j)\n if user_preferences[2] == \"any\" or user_preferences[2] == 0:\n fit_food = set(range(len(self.food_types)))\n else:\n for k,f in enumerate(self.food_types):\n if f == user_preferences[2]:\n fit_food.add(k)\n option_numbers = fit_area.intersection(fit_price, fit_food)\n if option_numbers:\n for i in option_numbers:\n res.append(self.restaurant_names[i])\n \n return res"
]
| [
"0.55701673",
"0.54838645",
"0.54508644",
"0.53599703",
"0.5310921",
"0.5309665",
"0.5201589",
"0.5175528",
"0.50172406",
"0.49962473",
"0.4967968",
"0.49516737",
"0.49477062",
"0.49312726",
"0.49152538",
"0.49084076",
"0.49028268",
"0.489911",
"0.4895174",
"0.4892474",
"0.4883487",
"0.4873139",
"0.48707145",
"0.48698643",
"0.48318177",
"0.47966462",
"0.47954416",
"0.47862214",
"0.47801596",
"0.4775187"
]
| 0.61802715 | 0 |
Replies the user with a snapshot of the specified location | async def snapshot(self, msg, *args):
if not Guard.has_permission(msg, 'attach_files'):
await msg.channel.send(**{
'content': 'Cannot send images on this channel',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
if not args:
return
args = list(args)
if args[0] == 'world':
include_world = True
args.pop(0)
else:
include_world = False
if args and args[0] == 'marker':
show_marker = True
args.pop(0)
else:
show_marker = False
try:
if len(args) == 2:
lat, lng = map(float, args)
zoom = 0
elif len(args) == 3:
lat, lng, zoom = map(float, args)
else:
return
except:
return
if show_marker:
map_controller = MapController(lat, lng, zoom, mlat=lat, mlng=lng)
else:
map_controller = MapController(lat, lng, zoom)
if not map_controller.is_valid():
await msg.channel.send(**{
'content': f'Invalid location {lat} {lng} {zoom}',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
image = await map_controller.generate_snapshot(include_world=include_world)
snapshot_id = map_controller.get_id().replace('_', ', ').replace('m', '')
location_str = f'center at -{snapshot_id}'
content = f'Here is a snapshot of that location ({location_str}).'
await msg.channel.send(**{
'content': content,
'file': discord.File(image, filename=f'snapshot_{map_controller.get_id()}.png'),
'reference': msg.to_reference(),
'mention_author': True,
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def saveLocally(self, location):\n t = threading.Thread(target=Snapshot.__saveSnapshot, args=(self, location))\n t.start()",
"def snap(self, path=None):\n if path is None:\n path = \"/tmp\"\n else:\n path = path.rstrip(\"/\")\n day_dir = datetime.datetime.now().strftime(\"%d%m%Y\")\n hour_dir = datetime.datetime.now().strftime(\"%H%M\")\n ensure_snapshot_dir(path+\"/\"+self.cam_id+\"/\"+day_dir+\"/\"+hour_dir)\n f_path = \"{0}/{1}/{2}/{3}/{4}.jpg\".format(\n path,\n self.cam_id,\n day_dir,\n hour_dir,\n datetime.datetime.now().strftime(\"%S\"),\n )\n\n urllib.urlretrieve(\n 'http://{0}/snapshot.cgi?user={1}&pwd={2}'.format(\n self.address, \n self.user, \n self.pswd,\n ),\n f_path,\n )\n #print resp[1]['Content-disposition'].replace(\"filename=\\\"\",\"\")[:-1]",
"def take_snapshot():\n df = scrape()\n for i in df.index:\n single = df.loc[i]\n # create or get locations\n loc, created = Location.objects.get_or_create(\n name=single['Location'],\n all_stands=single['Stands'],\n coordinates=single['Coords']\n )\n # add a new snapshot\n obj = Snapshot(\n location=loc,\n avail_bikes=single['Bikes'],\n free_stands=single['Free stands'],\n timestamp=datetime.now(tz=timezone('Europe/Warsaw'))\n )\n obj.save()",
"def snapshot(self, snapshot):\n self._context[\"snapshot\"] = snapshot",
"def snapshot(self):\n self._client.snapshot()",
"def snapshot(self):\n pass",
"def get_snapshot(project, zone, instance):\n snapshot_disks(project, zone, *get_disks(instance))",
"def snapshot_image_on_provider(self, builder, provider, credentials, target, template, parameters):",
"def photo(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n photo_file = update.message.photo[-1].get_file()\n x = \".jpg\"\n z = user.first_name + x\n photo_file.download(z)\n logger.info(\"Photo of %s: %s\", user.first_name, 'user_photo.jpg')\n update.message.reply_text(\n 'Che bella foto! ora, mandami la tua posizione se puoi, o scrivi /skip se non vuoi farlo.'\n )\n\n return LOCATION",
"def create_snapshot(self, **kwargs):\n post_body = json.dumps({'snapshot': kwargs})\n resp, body = self.post('snapshots', post_body)\n body = json.loads(body)\n self.validate_response(schema.create_snapshot, resp, body)\n return rest_client.ResponseBody(resp, body)",
"def snapshot(self, filename=None):\n if filename:\n self.command(\"snapshot %(filename)s\" % locals())\n else:\n self.command(\"snapshot\")",
"def screen_grab(save, location):\n if location is not None and not isinstance(location, str):\n raise TypeError(\"Please provide a string for the location argument.\")\n\n img = ImageGrab.grab(bbox=(20, 260, 520, 700))\n\n if save:\n img.save(location)\n\n return np.array(img)",
"def pull_streetview(location,\n size='640x480',\n fov='90',\n pitch='0',\n radius='50',\n key=keys.google,\n heading=None):\n try:\n filename = location.replace(' ', '_')\n except:\n filename = round(time.time(), 0)\n params = [{\n 'size': size,\n 'location': location,\n 'fov': fov,\n 'pitch': pitch,\n 'radius': radius,\n 'key': key\n }]\n if heading != None:\n params[0]['heading'] = heading\n\n results = google_streetview.api.results(params)\n results.download_links('./app/img')",
"def snapshot(snapshot_type, result_q, time_delta):",
"def get_snapshot_output(project: Optional[pulumi.Input[Optional[str]]] = None,\n snapshot: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSnapshotResult]:\n ...",
"def show_snapshot(self, snapshot_id):\n url = \"snapshots/%s\" % snapshot_id\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_snapshot, resp, body)\n return rest_client.ResponseBody(resp, body)",
"def recorder_snapshot(recorder_name, width=None, height=None, selected_name=None):\n recorder = get_recorder_by_name(recorder_name)\n # get snapshot name for the recorder\n if selected_name is None:\n filename = cnfg.recorders[recorder_name].filename_snapshot()\n else:\n snapshot_path = os.path.dirname(os.path.abspath(cnfg.recorders[recorder_name].filename_snapshot()))\n filename = os.path.join(snapshot_path, selected_name+'.jpg')\n if os.path.isfile(filename):\n height = int(height)\n width = int(width)\n img = cv2.imread(filename)\n # resize image and preserve aspect ratio\n orig_height, orig_width, _ = img.shape\n if orig_height > height:\n scale_height = height / orig_height\n else:\n scale_height = 1\n if orig_width > width:\n scale_width = width / orig_width\n else:\n scale_width = 1\n scale = min(scale_height, scale_width) \n if scale < 1:\n height = math.floor(orig_height*scale)\n width = math.floor(orig_width*scale)\n img = cv2.resize(img, (width, height)) \n # make darker if snapshot was not updated\n if not recorder.status in ['started'] and not recorder.watcher:\n a = np.double(img)\n b = a * 0.2\n img = np.uint8(b)\n # encode to jpeg image\n _, img_jpg = cv2.imencode('.jpg', img)\n response = make_response(img_jpg.tostring())\n response.headers.set('Content-Type', 'image/jpeg')\n return response\n else:\n return send_file('templates/static/nosnapshot.gif')",
"def do_takesnapshot(self, str_arg):\n img = None\n fname = validateString(str_arg)\n try:\n # self.adbc.wake()\n printLog(self.threadName + 'taking snapshot (0,50,%d,%d) ...' %\n (self.scn_width, self.scn_height))\n img = self.adbc.takeSnapshot(reconnect=True)\n # PIL code\n img = img.crop((0, 50, self.scn_width, self.scn_height))\n img.save(fname, SNAPSHOT_IMAGE_FORMAT)\n # if self.scn_width>SNAPSHOT_WIDTH:\n # self.compressImage(fname)\n # os.remove(fname)\n # im.save(fname)\n printLog(self.threadName + 'snapshot saved as %s' % fname)\n except EnvironmentError:\n self.resultFlag = False\n if DEBUG:\n traceback.print_exc()\n finally:\n img = None",
"def snapshot(self):\n return self._context.get(\"snapshot\", None)",
"def create_snapshot(self, snap_description=None):\n raise NotImplementedError()",
"def link_snapshot(argstr):\n pass",
"def location(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n user_location = update.message.location\n logger.info(\n \"Location of %s: %f / %f\", user.first_name, user_location.latitude, user_location.longitude\n )\n update.message.reply_text(\n 'Scommetto che è un posto da visitare! Per ultima cosa , dimmi qualcosa di te stessa/o.'\n )\n\n return BIO",
"def do_takesnapshotx(self, str_arg):\n # img = None\n fname = \"\"\n args = validateString(str_arg)\n # print args\n try:\n pa = re.compile('^(\\(\\d*,\\d*\\))\\D*(\\(\\d*,\\d*\\))(.+)$')\n matches = pa.search(args.strip()).groups()\n # print matches\n point1 = self.__getPointXY(matches[0])\n point2 = self.__getPointXY(matches[1])\n fname = matches[2].strip()\n except AttributeError, e:\n printLog(self.threadName + \"AttributeError: %s\" % e.message, logging.ERROR)\n raise ValueError('do_takesnapshotx: Bad parameter.')\n try:\n # self.adbc.wake()\n img = self.adbc.takeSnapshot(reconnect=True)\n printLog(self.threadName + \"getting sub image: x0=%d, y0=%d, x1=%d, y1=%d\" %\n (int(point1[0]), int(point1[1]), int(point2[0]), int(point2[1])))\n # PIL code\n img = img.crop((int(point1[0]), int(point1[1]),\n int(point2[0]), int(point2[1])))\n img.save(fname, SNAPSHOT_IMAGE_FORMAT)\n del img\n except Exception, e:\n self.resultFlag = False\n printLog(self.threadName + \"do_takesnapshotx: Exception: %s\" % e.message, logging.ERROR)",
"def location():\n return _locate_or_create()",
"def get_location(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/location\"\n })",
"def snapshot(self, context, instance, image_id, update_task_state):\n raise NotImplementedError()",
"def custom_storage(self):\n return SnapshotView(self, self.custom_data)",
"def saveSnapshot(self, filename): \n\t\tpass",
"def get_snapshot_object(session, key, snapshot=None):\n # type: (Session, Text, Optional[Text]) -> Any\n url_tail = \"/{}/{}/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS,\n session.network,\n CoordConstsV2.RSC_SNAPSHOTS,\n session.get_snapshot(snapshot),\n CoordConstsV2.RSC_OBJECTS,\n )\n return _get_stream(session, url_tail, {CoordConstsV2.QP_KEY: key})",
"def post_get_snapshot(self, response: pubsub.Snapshot) -> pubsub.Snapshot:\n return response"
]
| [
"0.6316481",
"0.6148619",
"0.59814966",
"0.5792674",
"0.57118636",
"0.55614907",
"0.5513457",
"0.54459316",
"0.53872216",
"0.53725713",
"0.52661574",
"0.5254602",
"0.52459157",
"0.52299005",
"0.5224609",
"0.52218723",
"0.521777",
"0.5199946",
"0.51784426",
"0.51539075",
"0.5152084",
"0.5141182",
"0.5120341",
"0.5120095",
"0.5108169",
"0.5107431",
"0.50969005",
"0.5096373",
"0.5069827",
"0.50677305"
]
| 0.6323737 | 0 |
Replies the user with the coordinates of the given place, as well as the snapshot and the URL | async def location(self, msg, place_name=None, *args):
if not place_name:
return
if args:
place_name = f'{place_name} {" ".join(args)}'
if place_name.lower() in MapController.locations:
lat, lng, size = MapController.locations[place_name.lower()]
map_controller = MapController(lat, lng, 1, lat, lng)
content = f'The location `{place_name}` is located at ({lat:.2f}, {lng:.2f})'
if Guard.has_permission(msg, 'embed_links'):
# If can embed link, post the URL too
url = map_controller.generate_url()
content = f'{content}\nURL: <{url}>'
response = {
'content': content,
'reference': msg.to_reference(),
'mention_author': True,
}
if Guard.has_permission(msg, 'attach_files'):
# If can post image, post the snapshot too
image = await map_controller.generate_snapshot(include_world=True)
response['file'] = discord.File(image, filename=f'snapshot_{map_controller.get_id()}.png')
await msg.channel.send(**response)
else:
await msg.channel.send(**{
'content': f'There is no location named `{place_name}`',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_place(self):\n place = self.status.user['location']\n return place",
"def geolocate(place): # string\n geolocator = geopy.geocoders.Nominatim()\n location = geolocator.geocode(place)\n # i dati si danno in (latitudine, longitudine), ma vanno intesi come (y, x)\n # ovvero vanno visualizzati come x=longitudine, y=latitudine\n return (location.latitude, location.longitude) # coordinate",
"def location(update: Update, context: CallbackContext) -> int:\n user = update.message.from_user\n user_location = update.message.location\n logger.info(\n \"Location of %s: %f / %f\", user.first_name, user_location.latitude, user_location.longitude\n )\n update.message.reply_text(\n 'Scommetto che è un posto da visitare! Per ultima cosa , dimmi qualcosa di te stessa/o.'\n )\n\n return BIO",
"def locate(self):\n if self.location == '':\n return None\n if self.coords is not None:\n return self.coords\n\n loc = urlencode({'address': self.location})\n urldoc = urlopen(User._GMAP_URL.format(query=loc))\n jsObj = json.loads(urldoc.readall().decode('utf-8'))\n if len(jsObj['results']) > 0:\n # discard commercial results\n locTypes = jsObj['results'][0]['address_components'][0]['types']\n if not 'premise' in locTypes and not 'route' in locTypes and not 'establishment' in locTypes and not 'subpremise' in locTypes:\n self.coords = jsObj['results'][0]['geometry']['location']\n return self.coords\n # still here? it's all rubbish\n return None",
"def location(bot, update):\n\n bot.send_message(chat_id=update.message.chat_id, text=\"OK you wait ah...\")\n latitude = update.message.location.latitude\n longitude = update.message.location.longitude\n bot.send_message(chat_id=update.message.chat_id, text=\"Just let you know for fun lol - your latitude is {0}, and your longitude is {1}\".format(latitude,longitude))\n try:\n # Read carpark csv as dataframe\n df = pd.read_csv('Parking_withcoords.csv')\n \n # Calculate distance between each carpark and postal code and append it to dataframe\n distance = []\n for coord in df['Coord_rad']: \n carpark = haversine((radians(latitude),radians(longitude)), ast.literal_eval(coord)) #converts string to tuple\n distance.append(carpark)\n df['Distance_km'] = distance\n\n # Sort in ascending order and extract top 5\n top_five = df.sort_values('Distance_km').head(5)\n\n for row in top_five['Info']:\n bot.send_message(chat_id=update.message.chat_id, parse_mode='HTML', text=row.replace(\"\\$\", \"$\"))\n\n bot.send_message(chat_id=update.message.chat_id, text=\"Fast hor! If you want to check other places, type /start again ok :P\")\n except:\n bot.send_message(chat_id=update.message.chat_id, text=\"Jialat liao got error...try again with /start and then use the postal code method can? Paiseh!\")",
"def get_coordinates(self, soup: BeautifulSoup) -> None:\n try:\n url = soup.find(\n \"a\", {\"title\": \"Open this area in Google Maps (opens a new window)\"}\n )[\"href\"]\n coordinates = url[url.find(\"=\") + 1 : url.find(\"&\")]\n coordinates = [float(n) for n in coordinates.split(\",\")]\n except (AttributeError, TypeError):\n coordinates = [None, None]\n self.__collected_dic[\"latitude\"].append(coordinates[0])\n self.__collected_dic[\"longitude\"].append(coordinates[1])",
"def update():\n\n # Ensure parameters are present\n if not request.args.get(\"sw\"):\n raise RuntimeError(\"missing sw\")\n if not request.args.get(\"ne\"):\n raise RuntimeError(\"missing ne\")\n\n # Ensure parameters are in lat,lng format\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"sw\")):\n raise RuntimeError(\"invalid sw\")\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"ne\")):\n raise RuntimeError(\"invalid ne\")\n\n # Explode southwest corner into two variables\n sw_lat, sw_lng = map(float, request.args.get(\"sw\").split(\",\"))\n\n # Explode northeast corner into two variables\n ne_lat, ne_lng = map(float, request.args.get(\"ne\").split(\",\"))\n\n # Find 10 cities within view, pseudorandomly chosen if more within view\n if sw_lng <= ne_lng:\n\n # Doesn't cross the antimeridian\n rows = db.execute(\"\"\"SELECT * FROM places\n WHERE :sw_lat <= latitude AND latitude <= :ne_lat AND (:sw_lng <= longitude AND longitude <= :ne_lng)\n GROUP BY country_code, place_name, admin_code1\n ORDER BY RANDOM()\n LIMIT 10\"\"\",\n sw_lat=sw_lat, ne_lat=ne_lat, sw_lng=sw_lng, ne_lng=ne_lng)\n\n else:\n\n # Crosses the antimeridian\n rows = db.execute(\"\"\"SELECT * FROM places\n WHERE :sw_lat <= latitude AND latitude <= :ne_lat AND (:sw_lng <= longitude OR longitude <= :ne_lng)\n GROUP BY country_code, place_name, admin_code1\n ORDER BY RANDOM()\n LIMIT 10\"\"\",\n sw_lat=sw_lat, ne_lat=ne_lat, sw_lng=sw_lng, ne_lng=ne_lng)\n\n # Output places as JSON\n return jsonify(rows)",
"def get_place_detail(\r\n self, place_id: str, fields: list[str] = []\r\n ) -> dict[str, Any]:\r\n default_field: list = [\r\n \"formatted_address\",\r\n \"geometry\",\r\n \"name\",\r\n \"photo\",\r\n \"type\",\r\n \"url\",\r\n \"vicinity\",\r\n \"rating\",\r\n ]\r\n # Remove duplicate field from both input fields and default fields.\r\n fields = list(set([*fields, *default_field]))\r\n place: dict = self.gmaps.place(\r\n place_id, fields=fields, language=\"ja\"\r\n )\r\n if place[\"status\"] != \"OK\":\r\n return {}\r\n\r\n result: dict[str, Any] = place[\"result\"]\r\n\r\n return result",
"def get_lat_lng(self):\n self.input_api = '%20'.join(self.parsed_question)\n self.input_api = ' '.join(self.parsed_question)\n self.google_api_url = 'https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input={}&inputtype=textquery&fields=geometry,name,place_id&types=point_of_interest&key={}'.format (self.input_api, api_key) \n self.r = requests.get(url=self.google_api_url)\n self.data = self.r.json()\n self.name = self.data['candidates'][0]['name']\n self.place_id = self.data['candidates'][0]['place_id']\n self.lat = self.data['candidates'][0]['geometry']['location']['lat']\n self.lng = self.data['candidates'][0]['geometry']['location']['lng']\n print(self.lat, self.lng, self.place_id)\n return (self.lat, self.lng, self.place_id)",
"def get_user_location():\r\n \r\n # API endpoint\r\n url = 'http://ip-api.com/json/'\r\n\r\n # API call\r\n response = requests.get(url)\r\n\r\n # Collect response in json format\r\n data = response.json()\r\n\r\n # Return data gathered\r\n if data['status'] == 'success':\r\n return {\r\n 'success': data['status'] == 'success', # Should exaluate to True\r\n 'city': data['city'],\r\n 'state': data['regionName'],\r\n 'ip_coordinates': str(data['lat']) + ', ' + str(data['lon']),\r\n 'lat': data['lat'],\r\n 'lon': data['lon'],\r\n 'ip_address': data['query']\r\n }\r\n else:\r\n return {\r\n 'success': data['status'] == 'success', # Should exaluate to False\r\n 'ip_address': data['query']\r\n }",
"def ctakeout_viewer():\r\n name = request.args[\"address\"]\r\n takeouts = get_zipcode_takeouts(name)\r\n\r\n if len(takeouts) > 0:\r\n takeouts['coordinate'] = 'end_point='+takeouts['name'].astype(str)+'&'+'end_lng=' + takeouts['lon'].astype(str)+'&'+'end_lat='+takeouts['lat'].astype(str)\r\n\r\n #genetrate folium map\r\n takeout_coordinates = takeouts[[\"lat\", \"lon\"]].values.tolist()\r\n\r\n map=make_folium_map(takeout_coordinates)\r\n\r\n\r\n # generate interactive map\r\n\r\n return render_template(\r\n \"page3_4t.html\",\r\n num_takeouts=get_num_takeouts(name),\r\n address=name,\r\n takeouts=takeouts[[\"name\", \"address\", 'coordinate']].values,\r\n map=map._repr_html_()\r\n )\r\n\r\n else:\r\n lng=get_address(name)[1]\r\n lat=get_address(name)[0]\r\n near_takeouts = find_5near_takeouts(lng, lat)\r\n near_takeouts['coordinate'] = 'end_point='+near_takeouts['name'].astype(str)+'&'+'end_lng=' + near_takeouts['lon'].astype(str)+'&'+'end_lat='+near_takeouts['lat'].astype(str)\r\n\r\n return render_template(\r\n \"page3_4t_notakeout.html\",\r\n address=name,\r\n near_takeout_table=near_takeouts[[\"name\", \"address\", \"coordinate\", \"distance\"]].values)",
"def __init__(self, place):\n self.place = place\n self.cleanplace = parser(self.place)\n self.key = googlemaps.Client(key=os.environ.get(\"GMAP_KEY\") or GMAP_KEY)\n self.response = self.search()\n self.latitude = self.response[0][\"geometry\"][\"location\"][\"lat\"]\n self.longitude = self.response[0][\"geometry\"][\"location\"][\"lng\"]\n self.address = self.response[0][\"formatted_address\"]\n self.wiki = self.response[0][\"address_components\"][1][\"long_name\"]",
"def get_lat_long(place_name):\n latitude = place_name[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n longitude = place_name[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n return latitude, longitude",
"def GetUserLocationView(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_user_location(return_type=None):\n\n gmaps = googlemaps.Client(key=GooglemapsService.api_key)\n json_response = gmaps.geolocate()\n user_location_coords = \"%s,%s\" % (json_response['location']['lat'], json_response['location']['lng'])\n\n if return_type is None:\n return_type = \"coords\"\n\n if return_type == \"address\":\n return GooglemapsService.get_reverse_geocode_result(user_location_coords)\n else:\n return user_location_coords",
"def show_place(locid):\n t0 = time.time()\n u_context = UserContext(user_session, current_user, request)\n try:\n # Open database connection and start transaction\n # readservice -> Tietokantapalvelu\n # reader ~= Toimialametodit\n\n with PlaceReader(\"read\", u_context) as service:\n # reader = PlaceReader(readservice, u_context)\n res = service.get_places_w_events(locid)\n\n if res[\"status\"] == Status.NOT_FOUND:\n print(f'bp.scene.routes.show_place: {_(\"Place not found\")}')\n # return redirect(url_for('virhesivu', code=1, text=f'Ei löytynyt yhtään'))\n if res[\"status\"] != Status.OK:\n print(\n f'bp.scene.routes.show_place: {_(\"Place not found\")}: {res.get(\"statustext\")}'\n )\n # return redirect(url_for('virhesivu', code=1, text=f'Virhetilanne'))\n\n except KeyError as e:\n traceback.print_exc()\n return redirect(url_for(\"virhesivu\", code=1, text=str(e)))\n\n cnt = len(res.get(\"events\")) if res.get(\"events\", False) else 0\n stk_logger(u_context, f\"-> bp.scene.routes.show_place n={cnt}\")\n return render_template(\n \"/scene/place_events.html\",\n place=res.get(\"place\"),\n pl_hierarchy=res.get(\"hierarchy\"),\n events=res.get(\"events\"),\n user_context=u_context,\n elapsed=time.time() - t0,\n )",
"async def snapshot(self, msg, *args):\n if not Guard.has_permission(msg, 'attach_files'):\n await msg.channel.send(**{\n 'content': 'Cannot send images on this channel',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n if not args:\n return\n args = list(args)\n if args[0] == 'world':\n include_world = True\n args.pop(0)\n else:\n include_world = False\n if args and args[0] == 'marker':\n show_marker = True\n args.pop(0)\n else:\n show_marker = False\n try:\n if len(args) == 2:\n lat, lng = map(float, args)\n zoom = 0\n elif len(args) == 3:\n lat, lng, zoom = map(float, args)\n else:\n return\n except:\n return\n if show_marker:\n map_controller = MapController(lat, lng, zoom, mlat=lat, mlng=lng)\n else:\n map_controller = MapController(lat, lng, zoom)\n if not map_controller.is_valid():\n await msg.channel.send(**{\n 'content': f'Invalid location {lat} {lng} {zoom}',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })\n return\n image = await map_controller.generate_snapshot(include_world=include_world)\n snapshot_id = map_controller.get_id().replace('_', ', ').replace('m', '')\n location_str = f'center at -{snapshot_id}'\n content = f'Here is a snapshot of that location ({location_str}).'\n await msg.channel.send(**{\n 'content': content,\n 'file': discord.File(image, filename=f'snapshot_{map_controller.get_id()}.png'),\n 'reference': msg.to_reference(),\n 'mention_author': True,\n })",
"def find_parking_spot(lat_u, long_u, timestamp):\n\n coord_u = project_long_lag_coord_into_cartesian([[lat_u, long_u]])\n xu, yu = coord_u[0][0], coord_u[0][1]\n user_point = Point(xu, yu)\n\n # Create my list of polygons from json\n json_data = cast_json_into_list()\n polygons_lat_long_coord = json_coordinates(json_data)\n\n # project lat-long to a plan\n polygons_cartesians_coord = [project_long_lag_coord_into_cartesian(_) for _ in polygons_lat_long_coord]\n\n # creates a list of polygons\n polygons_list = np.array([Polygon(_) for _ in polygons_cartesians_coord])\n\n # list of distances\n distances_list = np.array([distance_user_point_to_polygons(user_point, polygons_list)])\n distances_list_scaled = np.array([np.round(SCALING_FACTOR*elt, 0).astype(int) for elt in distances_list]).ravel()\n\n # Calls the probability\n probas = calculate_probs(timestamp)\n probas_display = calculate_probs(timestamp)\n print(probas)\n\n # Gets the names of the places\n places_name = json_names(json_data)\n\n # Combined metric\n #probas_s = (probas - np.array(probas).mean()) / np.array(probas).std()\n #distances_list_scaled_s = (distances_list_scaled - distances_list_scaled.mean()) / distances_list_scaled.std()\n #print(probas_s)\n #print(distances_list_scaled_s)\n metrics = np.array([round(10**5*probas[i]**4/distances_list_scaled[i],2) for i in range(len(places_name))])\n\n #print(places_name)\n #print(distances_list_scaled.ravel())\n #print(probas)\n\n # Creates the dataframe\n df = pd.DataFrame.from_dict(data={'place': places_name, 'distance': distances_list_scaled.ravel(), 'chance': probas_display, 'score': metrics})\n df = df.sort_values('score', ascending=False)\n\n json_table = df.to_json(orient=\"split\")\n print(json_table)\n return json_table",
"def getting_user_location():\n\n geoplugin_request = requests.get(\"http://www.geoplugin.net/json.gp\")\n\n if geoplugin_request.status_code != 200:\n print(\"It was not possible to obtain your current location. Please, try again later!\")\n exit()\n \n else:\n geo_plugin_response = geoplugin_request.json()\n latitude = geo_plugin_response['geoplugin_latitude']\n longitude = geo_plugin_response['geoplugin_longitude']\n\n user_location_info = tuple([latitude, longitude])\n\n return user_location_info",
"def carslocation():\n # Check if user is loggedin\n if 'loggedin' in session:\n\n response = requests.get(\"http://localhost:8080/api/carslocation\")\n print(response.text)\n locations = json.loads(response.text)\n\n # users is loggedin show them the home page\n return render_template('map.html', location=locations)\n # return render_template('map.html')\n # users is not loggedin redirect to login page\n return redirect(url_for('site.login'))",
"def updateCoordinates():\n\n lat = request.args.get(\"lat\")\n lng = request.args.get(\"lng\")\n\n print(\"New coordinates\")\n print(\"Latitude: \" + lat)\n print(\"Longitude: \" + lng)\n\n if session[\"user_type\"] == \"Operator\":\n # for testing purposes, we'll record all position changes\n db.execute(\"\"\"INSERT OR REPLACE\n INTO active_trucks (\n lat, lng, operatorid ) \n VALUES (?,?,?);\"\"\", \\\n (lat,lng,session[\"user_id\"],))\n conn.commit()\n\n return \"True\"",
"def google_places_detail(self, job):\n reference = job.get('reference', None)\n if not reference:\n print \"No reference found\"\n return\n\n url = \"https://maps.googleapis.com/maps/api/place/details/json\"\n args = {\n 'key': GOOGLE_API_KEY,\n 'sensor': 'false',\n 'reference': reference,\n }\n\n arg_list = []\n for k,v in args.items():\n arg_list.append(\"%s=%s\" % (k, v))\n\n url = \"%s?%s\" % (url, '&'.join(arg_list))\n\n r = requests.get(url)\n data = r.json()\n result = data['result']\n\n id = result['id']\n self.r.set(\"%s:%s\" % (REDIS_GOOGLE_DATA, id), json.dumps(data))\n print \"Stored: %s\" % id",
"def test_url_helper_coordinates():\n urlh = URLHelper()\n args = []\n kwargs = {\"coordinates\": \"210.80242917 54.348753\"}\n url = urlh.build_url(*args, **kwargs)\n assert url == \"https://archive.gemini.edu/jsonsummary/notengineering/NotFail/ra=210.802429/dec=54.348753\"",
"def locate(self):\n # TODO: The random location generation should only occur on a city-wide level, and should be optional\n\n # If a set of coordinates does exist return them\n if self.data['coordinates'] is not None:\n my_location = self.data['coordinates']['coordinates']\n return {'tweet_location': '`'.join(list(map(lambda x: str(x), my_location)))}\n # If they don't exist, but place does exist, look for a coordinates bounding box\n elif self.data['place'] is not None and self.data['place']['bounding_box']['coordinates']:\n boxes = self.data['place']['bounding_box']['coordinates'][0]\n my_lat = [boxes[0][1], boxes[1][1]]\n my_long = [boxes[0][0], boxes[2][0]]\n my_lat_range = random.randint(int(my_lat[0] * 100000), int(my_lat[1] * 100000)) / 100000\n my_long_range = random.randint(int(my_long[0] * 100000), int(my_long[1] * 100000)) / 100000\n return {'tweet_location': str(my_long_range) + '`' + str(my_lat_range)}\n # If the above fails, just return None\n else:\n return {'tweet_location': 'None'}",
"def get_place():\n zoom = 21\n place = request.form.get(\"place\")\n file_name = f'data/coordinates/coord_{place}_segmentation'\n tiles = load_pickle_file(file_name)\n zoom_factor = 2**21 / 2**zoom\n picHeight = 600 / zoom_factor # Resulting image height in pixels (x2 if scale parameter is set to 2)\n picWidth = 600 / zoom_factor\n\n xScale = math.pow(2, zoom) / (picWidth/256)\n yScale = math.pow(2, zoom) / (picHeight/256)\n total_tiles_sp = 0\n total_count_sp = 0\n total_sp_area = 0\n for i, tile in enumerate(tiles):\n tile['filename'] = f\"s3://solarnet-data/{tile['file_name']}\"\n if \"mask_url\" not in tile:\n tile['mask_url'] = \"\"\n else:\n tmp_url = tile['mask_url'].replace(\"img/\", \"\")\n tile['mask_url'] = f\"https://solarnet-data.s3.us-west-2.amazonaws.com/{tmp_url}\"\n tile['bounds'] = ts_imgutil.getImageBounds(tile['w'], tile['h'], xScale, yScale, tile['lat'], tile['lng'])\n if \"panels_area\" in tile:\n total_sp_area += tile[\"panels_area\"]\n if \"panels_count\" in tile:\n total_count_sp += tile[\"panels_count\"]\n if \"prediction\" in tile and int(tile[\"prediction\"]) == 1:\n total_tiles_sp += 1\n return json.dumps([tiles, total_tiles_sp, total_count_sp, round(total_sp_area, 2), len(tiles), place])",
"def success_geo(lat,lng):\n return (lng,lat)",
"def show_places():\n t0 = time.time()\n print(f\"--- {request}\")\n print(f\"--- {user_session}\")\n # Set context by owner and the data selections\n u_context = UserContext(user_session, current_user, request)\n # Which range of data is shown\n u_context.set_scope_from_request(request, \"place_scope\")\n u_context.count = request.args.get(\"c\", 50, type=int)\n\n with PlaceReader(\"read\", u_context) as service:\n # reader = PlaceReader(readservice, u_context)\n # The 'items' list has Place objects, which include also the lists of\n # nearest upper and lower Places as place[i].upper[] and place[i].lower[]\n res = service.get_place_list()\n\n if res[\"status\"] == Status.NOT_FOUND:\n print(f'bp.scene.routes.show_places: {_(\"No places found\")}')\n elif res[\"status\"] != Status.OK:\n print(\n f'bp.scene.routes.show_places: {_(\"Could not get places\")}: {res.get(\"statustext\")}'\n )\n\n elapsed = time.time() - t0\n stk_logger(\n u_context,\n f\"-> bp.scene.routes.show_places n={len(res.get('items'))} e={elapsed:.3f}\",\n )\n return render_template(\n \"/scene/places.html\",\n places=res[\"items\"],\n menuno=4,\n user_context=u_context,\n elapsed=elapsed,\n )",
"def place_by_name(place, API_KEY=API_KEY, FIND_PLACE_API_URL=FIND_PLACE_API_URL):\n params = {\n 'input': '{}'.format(place),\n 'fields':'name,geometry,formatted_address',\n 'inputtype':'textquery',\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(FIND_PLACE_API_URL, params=params)\n\n response = response.json()['candidates'][0]\n\n geodata = dict()\n geodata['lat'] = response['geometry']['location']['lat']\n geodata['lng'] = response['geometry']['location']['lng']\n geodata['address'] = response['formatted_address']\n\n return geodata",
"def getAddress(user):",
"def get_lat_long(place_name):\n place = place_name.replace(' ', '%20')\n url = '{}?key={}&location={}'.format(MAPQUEST_BASE_URL, MAPQUEST_API_KEY,place)\n # print(url)\n place_json = get_json(url)\n lat = place_json[\"results\"][0][\"locations\"][0][\"latLng\"][\"lat\"]\n lng = place_json[\"results\"][0][\"locations\"][0][\"latLng\"][\"lng\"]\n return lat, lng"
]
| [
"0.5741757",
"0.5636716",
"0.5636548",
"0.56217134",
"0.53884745",
"0.53340495",
"0.5326232",
"0.53156716",
"0.52914727",
"0.52897",
"0.5223609",
"0.5215813",
"0.52116656",
"0.52053446",
"0.5197685",
"0.5193562",
"0.5173581",
"0.51653576",
"0.5164704",
"0.5133908",
"0.51150113",
"0.51023114",
"0.5094767",
"0.5089322",
"0.506533",
"0.50494957",
"0.5042588",
"0.50405246",
"0.5033995",
"0.5027723"
]
| 0.59125996 | 0 |
Replies the user with the distance between the two place names mentioned | async def distance(self, msg, place1=None, place2=None, *args):
if not place1 or not place2:
return
try:
if place1.lower() not in MapController.locations:
raise ValueError(place1)
if place2.lower() not in MapController.locations:
raise ValueError(place2)
except ValueError as e:
await msg.channel.send(**{
'content': f'There is no location named `{e.args[0]}`',
'reference': msg.to_reference(),
'mention_author': True,
'delete_after': 3,
})
return
lat1, lng1, _ = MapController.locations[place1.lower()]
lat2, lng2, _ = MapController.locations[place2.lower()]
distance = ((lat1-lat2)**2 + (lng1-lng2)**2)**0.5
content = f'The distance between {place1} ({lat1}, {lng1}) and {place2} ({lat2}, {lng2}) is {distance:.0f}km.'
await msg.channel.send(**{
'content': content,
'reference': msg.to_reference(),
'mention_author': True,
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance():\n return str(us.get_distance())",
"def edit_distance(str1, str2):\r\n pass",
"def getDistance(pos1, pos2):\r\n return ((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2) ** 0.5",
"def get_distance_metres(aLocation1, aLocation2):\n [dNorth, dEast, dDown] = get_position_error(aLocation1, aLocation2)\n \n return math.sqrt((dNorth*dNorth) + (dEast*dEast))",
"def test_get_distance_to_same_place() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n )\n\n assert meters == 0",
"def calculate_distance(coords1, coords2):# 3 sets of double quotes allows to give a description for help\n distance_x = coords1[0] - coords2[0]\n distance_y = coords1[1] - coords2[1]\n distance_z = coords1[2] - coords2[2]\n distance = numpy.sqrt(distance_x**2 + distance_y**2 + distance_z**2)\n return distance",
"def get_distance(user_id1: str, user_id2: str) -> float:\n features1 = get_feature_vector(user_id1)\n features2 = get_feature_vector(user_id2)\n pass",
"def calculate_distance(atom1,atom2): #dot string to show when you go into the help doc of this function\n x_distance = atom1[0]-atom2[0]\n y_distance = atom1[1]-atom2[1]\n z_distance = atom1[2]-atom2[2]\n distance = numpy.sqrt(x_distance**2+ y_distance**2+z_distance**2)\n return distance",
"def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2",
"def distance(self, a, b):\n \n # -----------------------------\n # Your code\n '''R = 3963 # radius of Earth (miles)\n lat1, lon1 = math.radians(a[0]), math.radians(a[1])\n lat2, lon2 = math.radians(b[0]), math.radians(b[1])\n \n return math.acos(math.sin(lat1) * math.sin(lat2) + math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R*0.000621371'''\n return abs(a[0] - b[0]) + abs(a[1] - b[1])\n \n \n # -----------------------------",
"def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm",
"def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm",
"def get_distance(self, name1, name2):\n ind1 = self.leaves.index(name1)\n ind2 = self.leaves.index(name2)\n return self.orig_dists[ind1, ind2]",
"def test_get_distance(self):\n meters = location_util.distance(COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1])\n self.assertAlmostEqual(meters / 1000, DISTANCE_KM, places=2)",
"def calc_distance(user_loc, space):\n geocode_result = gmaps.geocode(space['_location'])\n dest_loc = geocode_result[0]['geometry']['location']\n direction = gmaps.distance_matrix(user_loc, dest_loc, mode=\"walking\")\n distance = direction['rows'][0]['elements'][0]['distance']['value']\n # convert to mile\n distance = distance * 0.000621371\n return distance",
"def _calcPlaceDist(self, uRelief, uArea, dbRelief, dbArea):\n\n # overlap of the two ellipses\n sumArr = uRelief + dbRelief\n overlap = (sumArr == 2).sum()\n\n placeDist = 1 - (2 * overlap) / (uArea + dbArea)\n return placeDist",
"def get_distance_meters(location1, location2):\n dlat = location2.lat - location1.lat\n dlong = location2.lon - location1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01",
"def get_distance_meters(aLocation1, aLocation2):\n dlat = aLocation2.lat - aLocation1.lat\n dlong = aLocation2.lon - aLocation1.lon\n return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5",
"def getDist(lat1,long1,lat2,long2):\n\tlat1 = math.radians(lat1)\n\tlong1 = math.radians(long1)\n\tlat2 = math.radians(lat2)\n\tlong2 = math.radians(long2)\n\tR = 6371 # km\n\td = cmath.acos(cmath.sin(lat1) * cmath.sin(lat2) + \\\n\tcmath.cos(lat1) * cmath.cos(lat2) *\n\tcmath.cos(long2 - long1)) * R\n\treturn abs(d) # cast to float",
"def distance_between(pt1: tuple, pt2: tuple) -> float:\r\n\r\n return ((pt2[1] - pt1[1])**2 + (pt2[0] - pt1[0])**2)**0.5",
"def get_distance(self, resp1, resp2):\n feed_dict = {self.anchor: resp1}\n embed1 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n feed_dict = {self.anchor: resp2}\n embed2 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n return np.sqrt(np.sum((embed1-embed2)**2, 1))",
"def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)",
"def dist(pos1, pos2):\n a, b = pos1\n c, d = pos2\n \n return sqrt((a-c)**2 + (b-d)**2)",
"def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))",
"def gram_edit_distance(self, gram1, gram2):\r\n distance = 0.0\r\n if gram1 == gram2:\r\n distance = 1.0\r\n return distance",
"def measure_distance(cell1, cell2):\n\n\tx1, y1 = cell1.location\n\tx2, y2 = cell2.location\n\tx_dist = abs(x1-x2)\n\ty_dist = abs(y1-y2)\n\n\tif x_dist > 5:\n\t\tx_dist = 10-x_dist\n\tif y_dist > 5:\n\t\ty_dist = 10-y_dist\n\n\treturn (x_dist**2 + y_dist**2)**.5",
"def distance(point_1, point_2, units=1):\n\n distance = (((point_2[0]-point_1[0])*units)**2.0\n + ((point_2[1]-point_1[1])*units)**2.0\n + ((point_2[2]-point_1[2])*units)**2.0)**0.5\n \n return distance",
"def distance(a, b):\n return vincenty((float(a.longitude), float(a.latitude)),\n (float(b.longitude), float(b.latitude))).km",
"def get_distance(lat1, long1, lat2, long2):\n x = 69.1*(lat2 - lat1)\n y = 69.1*(long2 - long1) * math.cos(lat1/57.3)\n dist = math.sqrt(x*x + y*y)\n return dist"
]
| [
"0.69901127",
"0.6892306",
"0.64907265",
"0.6486312",
"0.6462533",
"0.6446145",
"0.6441033",
"0.6402432",
"0.6395236",
"0.6380346",
"0.63785714",
"0.63785714",
"0.63747483",
"0.633181",
"0.6319796",
"0.6289507",
"0.62639916",
"0.62585694",
"0.62468517",
"0.6234138",
"0.6226197",
"0.6223012",
"0.6222588",
"0.6217585",
"0.62146807",
"0.62103045",
"0.6208052",
"0.61977166",
"0.61966807",
"0.6188591"
]
| 0.69919837 | 0 |
Sets the activity of the bot | async def set_activity(self, msg, activity=None, *args):
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=activity)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_activity(self, status):\n self._activity = status",
"async def activity(self, ctx:utils.Context, activity_type:str, *, name:str=None):\n\n if name:\n activity = discord.Activity(name=name, type=getattr(discord.ActivityType, activity_type.lower()))\n else:\n await self.bot.set_default_presence()\n return\n await self.bot.change_presence(activity=activity, status=self.bot.guilds[0].me.status)",
"def activity(self, activity):\n if activity is None:\n raise ValueError(\"Invalid value for `activity`, must not be `None`\") # noqa: E501\n\n self._activity = activity",
"def set_current_activity(client, activity_label):\n\n id = activities_by_name[activity_label]\n func = client.start_activity(id)\n status = run_in_loop_now('start_activity', func)\n return status",
"async def activity(self, ctx, activity_type: str.lower, *, message: str = \"\"):\n if activity_type == \"clear\":\n self.bot.config.remove(\"activity_type\")\n self.bot.config.remove(\"activity_message\")\n await self.bot.config.update()\n await self.set_presence()\n embed = Embed(title=\"Activity Removed\", color=self.bot.main_color)\n return await ctx.send(embed=embed)\n\n if not message:\n raise commands.MissingRequiredArgument(SimpleNamespace(name=\"message\"))\n\n activity, msg = (\n await self.set_presence(\n activity_identifier=activity_type,\n activity_by_key=True,\n activity_message=message,\n )\n )[\"activity\"]\n if activity is None:\n raise commands.MissingRequiredArgument(SimpleNamespace(name=\"activity\"))\n\n self.bot.config[\"activity_type\"] = activity.type.value\n self.bot.config[\"activity_message\"] = message\n await self.bot.config.update()\n\n embed = Embed(\n title=\"Activity Changed\", description=msg, color=self.bot.main_color\n )\n return await ctx.send(embed=embed)",
"def setactivity(self, activity: Optional[ba.Activity]) -> None:\n\n self._activity = None if activity is None else weakref.ref(activity)\n\n # Load our media into this activity's context.\n if activity is not None:\n if activity.expired:\n print_error('unexpected finalized activity')\n else:\n with _ba.Context(activity):\n self._load_activity_media()",
"def set_activity(self, activity_name: str, activity_timestamp: datetime) -> None:\n activity = Activity(activity_name, activity_timestamp)\n self.activities.append(activity)",
"def activities(self, activities):\n \n self._activities = activities",
"def activity(self, activity):\n allowed_values = [\"PICKUP\", \"DROPOFF\", \"EXECUTE\", \"BREAK\"] # noqa: E501\n if activity not in allowed_values:\n raise ValueError(\n \"Invalid value for `activity` ({0}), must be one of {1}\" # noqa: E501\n .format(activity, allowed_values)\n )\n\n self._activity = activity",
"def update_activity():\n pass",
"def activities(self, activities):\n\n self._activities = activities",
"async def schedule_activity():\n if controller.scheduled_activity_date is not None:\n return\n controller.scheduled_activity_date = datetime.now()+timedelta(seconds=30)\n await wait_until(controller.scheduled_activity_date)\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='for ~command'))",
"async def change_presence(self, **kwargs):\n try:\n presence = kwargs.pop('status')\n await self.set_presence(presence)\n except KeyError:\n pass\n\n try:\n status = kwargs.pop('activity')\n await self.set_status(status)\n except KeyError:\n pass",
"def alter(self, instance, activity, **kwargs):\n return activity",
"async def on_message_activity(self, turn_context: TurnContext):\n TurnContext.remove_recipient_mention(turn_context.activity)\n turn_context.activity.text = turn_context.activity.text.strip()\n\n if not self.unlocked:\n await turn_context.send_activity(\"De bot is gedeactiveerd en kan dus niet gebruikt worden.\")\n return\n\n # Based on a given command, the bot performs a function.\n\n # Return all committees that are available at this moment\n if turn_context.activity.text == \"BeschikbareCommissies\":\n await self.available_committees(turn_context)\n return\n\n # Choose a committee for a mentor group to visit.\n if turn_context.activity.text.startswith(\"ChooseCommittee\"):\n await self.choose_committee(turn_context)\n return\n \n if turn_context.activity.text == \"RandomCommittee\":\n await self.random_committee(turn_context)\n return\n\n # When someone enrolls for a certain committee\n if turn_context.activity.text.startswith(\"Enroll\"):\n await self.enroll(turn_context)\n return\n\n if turn_context.activity.text == \"Vrijgeven\":\n await self.release_committee(turn_context)\n return\n \n if turn_context.activity.text == \"Inschrijfbalie\":\n await self.association_planning(turn_context)\n return\n \n if turn_context.activity.text == \"UpdateCard\":\n await self.update_card(turn_context)\n return\n\n # Get all intro members\n if turn_context.activity.text == \"Introleden\":\n await self.get_intro(turn_context)\n return\n \n # Save enrollments to google sheet\n if turn_context.activity.text == \"InschrijvingenOpslaan\":\n await self.save_enrollments(turn_context)\n return\n \n # Update inschrijfbalie planning with certain delay\n if turn_context.activity.text.startswith(\"UpdateInschrijfbalie\"):\n await self.update_association_planning(turn_context)\n return\n \n if turn_context.activity.text.startswith(\"VeranderCommissie\"):\n await self.switch_committee(turn_context)\n return\n \n if turn_context.activity.text.startswith(\"MentorVrijgeven\"):\n await self.release_mentor_group(turn_context)\n return\n \n if turn_context.activity.text == \"AllesVrijgeven\":\n await self.release_all(turn_context)\n return\n\n #TODO: what to send if it is not a command?\n await turn_context.send_activity(\"Ik ken dit commando niet. Misschien heb je een typfout gemaakt?\")\n return",
"def manipulate_activity():\n pass",
"async def change_presence(self, *, activity: Optional[Any] = ..., status: Optional[Any] = ..., afk: bool = ..., shard_id: Optional[Any] = ...):\n ...",
"async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username",
"def activity_process(self, activity_process):\n\n self._activity_process = activity_process",
"def set_actives(self, actives):\n name = 'setActives'\n data = {'actives': actives}\n\n self._send_websocket_request(name, data)",
"def activity_send(user, verb=None, object=None, target=None):\n if target is None:\n activity.send(user, verb=verb, object=object)\n video = object\n else:\n activity.send(user, verb=verb, object=object, target=target)\n if isinstance(target, Video):\n video = target\n else:\n video = object\n \n if isinstance(video, Video):\n # get video's users\n video_users = VideoUsers.objects.filter(video_id=video.id)\n \n # update their stats and send push notifications\n if verb == 'like':\n # get video's other users\n video_users = video_users.exclude(user_id = user.id)\n video_users.update(new_likes_count = F('new_likes_count') + 1)\n \n elif verb == 'add':\n video_users.update(new_clips_count = F('new_clips_count') + 1)\n \n elif verb == 'invite':\n pass",
"def set_turn(self):\n if self.status == self.PLAYER_TURN:\n return\n self.status = self.PLAYER_TURN\n self.client.send_player_turn(10)",
"def set_goal(self, goal):\r\n self.goal = goal\r\n self.start_time = self.get_current_time()",
"async def set_chat(self, args):\n value = args if isinstance(args, bool) else args.lower() in ('yes', 'true', '1')\n if self.chat == value:\n return\n self.chat = value\n if self.chat_message is not None:\n await self.delete_message(self.chat_message)\n await self.set_trigger('chat_init', None)\n await self.set_trigger('chat', None)\n tag = 'chat' if self.chat else 'chat_init'\n self.chat_message = await self.send_tag(tag, emoji.TRIGGERS[tag], 'Chat enabled' if self.chat else 'Chat muted')\n if not self.chat:\n await self.shell_terminate_all(self.shell_chat)",
"def set_conversation(self, conversation):\r\n self.conversation = conversation",
"def bot_type(self, bot_type):\n\n self._bot_type = bot_type",
"def set_goal(self, **kwargs):\n return self.env.set_goal(**kwargs)",
"def execute(self, activity, context):\n\n raise NotImplementedError",
"def write_activityMessage(self, value):\n self.update_attr_map(\"activityMessage\", value)",
"def noteActivity(): \r\n global lastActivity\r\n lastActivity = millis()"
]
| [
"0.76861185",
"0.70110416",
"0.6871109",
"0.6633408",
"0.6617505",
"0.65683097",
"0.6544981",
"0.64911014",
"0.64569134",
"0.6431248",
"0.63290405",
"0.6172366",
"0.6133315",
"0.59609354",
"0.59220105",
"0.5766587",
"0.57659775",
"0.5760408",
"0.5752067",
"0.5601439",
"0.5585608",
"0.55450714",
"0.5501435",
"0.5486907",
"0.54826844",
"0.54809576",
"0.54689413",
"0.5343027",
"0.5332886",
"0.5313753"
]
| 0.8018901 | 0 |
Send some status about the bots | async def status(self, msg, *args):
content = self.get_status()
await msg.channel.send(**{
'content': content,
'reference': msg.to_reference(),
'mention_author': True,
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def status(self, context):\n await self.send_message(context, await self.status_msg_packed(context))",
"def send_robot_status(self, robot_status):\n self.robot_status_sender.send(robot_status)",
"async def status(self, ctx: Context):\n # Get lines of code\n lines_of_code = os.popen(\n r'find . -path ./.venv -prune -false -o -name \"*.py\" -exec cat {} \\; | wc -l').read()\n\n # Get memory usage\n process = psutil.Process(os.getpid())\n memory_usage = process.memory_info().rss / 1024 ** 2\n\n await ctx.send(\n embed=discord.Embed(\n title=f'{self.bot.user.name} Status',\n colour=self.bot.user.colour\n ).set_thumbnail(\n url=self.bot.user.avatar_url\n ).add_field(\n name='Users:', value=len(self.bot.users)\n ).add_field(\n name='Guilds:', value=len(self.bot.guilds)\n ).add_field(\n name='Started at:', value=format_dt(self.bot._start_time)\n ).add_field(\n name='Memory usage:', value=f'{memory_usage:.2f} MB'\n ).add_field(\n name='Cogs loaded:', value=len(self.bot.cogs)\n ).add_field(\n name='Lines of code:', value=lines_of_code or 'Unknown'\n ).add_field(\n name='Quick links:',\n value='[Source Code](https://github.com/bijij/Silvally)',\n inline=False\n )\n )",
"async def status(self, ctx:utils.Context, status:str):\n\n status_o = getattr(discord.Status, status.lower())\n await self.bot.change_presence(activity=self.bot.guilds[0].me.activity, status=status_o)",
"def on_status(update, context):\n current_state = context.user_data[\"state\"]\n current_request = context.user_data.get(\"current_request\", None)\n message = f\"State: {current_state}\\nRequest: {current_request}\"\n\n context.bot.send_message(chat_id=update.message.chat_id, text=message)",
"async def status(self, ctx, *, status=None):\n # [p]set status <status>\n\n statuses = {\n \"online\": discord.Status.online,\n \"idle\": discord.Status.idle,\n \"dnd\": discord.Status.dnd,\n \"invisible\": discord.Status.invisible\n }\n\n server = ctx.message.server\n\n current_game = server.me.game if server is not None else None\n\n if status is None:\n await self.bot.change_presence(status=discord.Status.online,\n game=current_game)\n await self.bot.say(\"Status reset.\")\n else:\n status = statuses.get(status.lower(), None)\n if status:\n await self.bot.change_presence(status=status,\n game=current_game)\n await self.bot.say(\"Status changed.\")\n else:\n await send_command_help(ctx)",
"async def status(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n up_time = time.time() - self.start_time\n m, s = divmod(up_time, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n stime = time.time() - psutil.boot_time()\n m, s = divmod(stime, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n system_uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n mem = psutil.virtual_memory()\n\n pid = os.getpid()\n memory_use = psutil.Process(pid).memory_info()[0]\n\n content = discord.Embed(title=f\"Miso Bot | version {main.version}\")\n content.set_thumbnail(url=self.client.user.avatar_url)\n\n content.add_field(name=\"Bot process uptime\", value=uptime_string)\n content.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent()}%\")\n content.add_field(name=\"System uptime\", value=system_uptime_string)\n\n content.add_field(name=\"System RAM Usage\", value=f\"{mem.percent}%\")\n content.add_field(name=\"Bot memory usage\", value=f\"{memory_use/math.pow(1024, 2):.2f}MB\")\n\n await ctx.send(embed=content)",
"def status(message):\n message.reply('User_id: ' +\n str(message._client.users[message._get_user_id()]))",
"def status(message):\n message.reply('User_id: ' +\n str(message._client.users[message._get_user_id()]))",
"def send_game_status(ok, msg, client_key, from_name, send_message_func ):\n\n status_type = messageActions.Action_status.TYPE_GAME\n\n new_client_message = message.Message( client_key, 's' )\n new_message = new_client_message.new_message( from_name, status_type, ok, msg )\n new_client_message.message = new_message\n new_client_message.to_clients = [ client_key ]\n\n send_message_func( new_client_message )",
"def SendStatus(self, body):\n\n status = {\"status\" : {\"body\" : unicode(body, \"utf-8\")}}\n self.__PostJson(\"/statuses\", status)",
"async def getstatuses(self, ctx):\n final_list = \"\"\n statuses = await ex.get_bot_statuses()\n if statuses is not None:\n for status in await ex.get_bot_statuses():\n final_list += f\"{status[0]}\\n\"\n else:\n final_list = \"None\"\n embed = discord.Embed(title=\"Statuses\", description=final_list)\n await ctx.send(embed=embed)",
"async def tweepy_on_status(self, tweet):\n self.processed_tweets += 1\n if self.skip_tweet(tweet):\n return\n\n chan_conf = dutils.get(self.conf.follows, id=tweet.author.id_str)\n try:\n embed = await self.prepare_embed(tweet)\n content = None\n except:\n embed = None\n content = 'Failed to prepare embed for ' + tweet.tweet_web_url # If the preparation failed before setting tweet.tweet_web_url imma kms\n log.error('Failed to prepare embed for ' + str(tweet._json))\n\n # Make sure we're ready to send messages\n await self.bot.wait_until_ready()\n\n for channel in chan_conf.discord_channels:\n discord_channel = self.bot.get_channel(channel.id)\n\n # Check if the channel still exists\n if discord_channel is None:\n log.error('Channel {} unavailable to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n continue\n\n # Check for required permissions\n perms = discord_channel.permissions_for(discord_channel.server.me)\n if not perms.embed_links:\n log.warning('Improper permissions in channel {} to display tweet {}.'.format(discord_channel.id, tweet.id_str))\n try:\n warning = '\\N{WARNING SIGN} Missed tweet from {} : `Embed links` permission missing. \\N{WARNING SIGN}'.format(tweet.author.screen_name)\n await self.bot.send_message(discord_channel, warning)\n except discord.DiscordException as e:\n log.error('Could not send warning to channel {}.\\n{}'.format(discord_channel.id, e))\n continue\n\n # Send the embed to the appropriate channel\n log.debug('Scheduling Discord message on channel ({}) : {}'.format(channel.id, tweet.text))\n await self.bot.send_message(discord_channel, content=content, embed=embed)\n\n # Update stats and latest id when processing newer tweets\n if tweet.id > chan_conf.latest_received:\n channel.received_count += 1\n chan_conf.latest_received = tweet.id\n self.conf.save()",
"async def twitter_status(self, ctx):\n server_channels = set(c.id for c in ctx.message.server.channels)\n\n followed_count = 0\n displayed_count = 0\n for chan_conf in self.conf.follows:\n # Check if this channel is displayed in the server\n if set(c.id for c in chan_conf.discord_channels) & server_channels:\n followed_count += 1\n displayed_count += sum(c.received_count for c in chan_conf.discord_channels if c.id in server_channels)\n\n # Calculate the average tweets processed per minute\n minutes = (time.time() - self.bot.start_time) / 60\n processed_average = self.processed_tweets / minutes\n processed_average = '< 1' if processed_average < 1 else round(processed_average)\n tweets_processed = '{} (avg {} / min)'.format(self.processed_tweets, processed_average)\n\n # Display the info\n if self.stream.running:\n embed = discord.Embed(title='Stream status', description='Online', colour=0x00ff00)\n else:\n embed = discord.Embed(title='Stream status', description='Offline', colour=0xff0000)\n embed.add_field(name='Tweets processed since startup', value=tweets_processed, inline=False)\n embed.add_field(name='Channels followed', value=followed_count)\n embed.add_field(name='Tweets displayed', value=displayed_count)\n\n await self.bot.say(embed=embed)",
"async def status(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if adv.is_on_adventure(ctx.author.id):\n out = adv.print_adventure(ctx.author.id)\n else:\n out = 'You are not doing anything at the moment.'\n await ctx.send(out)",
"def __bot_info(self):\n log.debug(\"Displaying __bot_info\")\n self.bot.send_message(self.chat.id, self.loc.get(\"bot_info\"))",
"async def status(self):\n cmd = subprocess.check_output([\"birdc\", \"show\", \"proto\"])\n for page in chat_formatting.pagify(cmd.decode(), ['\\n', ' '], shorten_by=12):\n await self.bot.say(chat_formatting.box(page))",
"async def report(self, ctx):\n try:\n members = self.bot.get_all_members()\n online, offline, other = 0,0,0\n for member in members:\n if member.status.online:\n online += 1\n elif member.status.offline:\n offline += 1\n else:\n other += 1\n message = discord.Embed(title='Server report',type='rich', colour=discord.Color(0xffb6c1))\n message.add_field(name='Online',value='**{}** online members'.format(online))\n message.add_field(name='Offline',value='**{}** offline members'.format(offline))\n message.add_field(name='Other',value='**{}** other members'.format(other))\n await self.bot.say(embed=message)\n\n except Exception as error:\n await self.bot.say('The report has failed !')\n self.logger.error(error)",
"async def botinfo(self, ctx):\n\n dev = await self.bot.fetch_user(170506717140877312)\n\n start = perf_counter()\n status_msg = await ctx.send('Beregner ping...')\n end = perf_counter()\n ping = int((end - start) * 1000)\n\n now = time()\n diff = int(now - self.bot.uptime)\n days, remainder = divmod(diff, 24 * 60 * 60)\n hours, remainder = divmod(remainder, 60 * 60)\n minutes, seconds = divmod(remainder, 60)\n\n process = Process(getpid())\n memory_usage = round(process.memory_info().rss / 1000000, 1)\n cpu_percent = process.cpu_percent()\n\n total_members = []\n online_members = []\n idle_members = []\n dnd_members = []\n offline_members = []\n for guild in self.bot.guilds:\n for member in guild.members:\n if member.id in total_members:\n continue\n total_members.append(member.id)\n if str(member.status) == 'online':\n online_members.append(member.id)\n elif str(member.status) == 'idle':\n idle_members.append(member.id)\n elif str(member.status) == 'dnd':\n dnd_members.append(member.id)\n elif str(member.status) == 'offline':\n offline_members.append(member.id)\n\n embed = discord.Embed(color=ctx.me.color, url=self.bot.misc['website'])\n embed.set_author(name=dev.name, icon_url=dev.avatar_url)\n embed.set_thumbnail(url=self.bot.user.avatar_url)\n embed.add_field(name='Dev', value=f'{dev.mention}\\n{dev.name}#{dev.discriminator}')\n embed.add_field(name='Oppetid', value=f'{days}d {hours}t {minutes}m {seconds}s')\n embed.add_field(name='Ping', value=f'Ekte ping: {ping} ms\\nWebsocket ping: {int(self.bot.latency * 1000)} ms')\n embed.add_field(name='Servere', value=len(self.bot.guilds))\n embed.add_field(name='Discord.py', value=discord.__version__)\n embed.add_field(name='Python', value=platform.python_version())\n embed.add_field(name='Ressursbruk', value=f'RAM: {memory_usage} MB\\nCPU: {cpu_percent}%')\n embed.add_field(name='Maskin', value=f'{platform.system()} {platform.release()}')\n embed.add_field(name=f'Brukere ({len(total_members)})',\n value=f'{self.bot.emoji[\"online\"]}{len(online_members)} ' +\n f'{self.bot.emoji[\"idle\"]}{len(idle_members)} ' +\n f'{self.bot.emoji[\"dnd\"]}{len(dnd_members)} ' +\n f'{self.bot.emoji[\"offline\"]}{len(offline_members)}')\n embed.add_field(name='Lenker', value='[Inviter](https://discordapp.com/oauth2/authorize?client_' +\n f'id={self.bot.user.id}&permissions=388174&scope=bot) ' +\n f'| [Nettside]({self.bot.misc[\"website\"]}) ' +\n f'| [Kildekode]({self.bot.misc[\"source_code\"]})')\n await Defaults.set_footer(ctx, embed)\n await status_msg.edit(embed=embed, content=None)",
"async def tod_status(self, ctx, *args):\n n = len(self.players)\n if n > 0:\n if n == 1:\n s = \"person\"\n else:\n s = \"people\"\n message = f\"A Truth or Dare game is currently taking place with {n} {s}!\"\n else:\n message = \"No Truth or Dare game is currently taking place.\"\n await ctx.send(message)",
"async def _bot(ctx):\n await ctx.send('Yes, the bot is cool.')",
"def show_loan_stats(self, update, context):\n\n # Send preliminary message\n msg = 'Some message...'\n self.send_str(msg, update, context)\n\n # Send pic\n self.sendPic('loans.png', update, context)",
"async def server_status(self, ctx, alias):\n if ctx.invoked_subcommand is None:\n alias = alias.lower()\n if alias not in self.key_data:\n await self.bot.send_message(ctx.message.author, \"We aren't tracking a server called {}\".format(alias))\n if (self.key_data == {}):\n await self.bot.say(\"Configure the key first bud\")\n return\n else:\n if not ctx.message.channel.is_private:\n await self.bot.send_message(ctx.message.author, \"Please only use `!server` in PMs with me.\")\n try:\n if alias not in self.key_data:\n await self.bot.send_message(ctx.message.author, \"No server by that alias.\")\n return\n status = await self.get_status(self.key_data[alias][\"key\"])\n message = self.embedMessage(status, alias)\n await self.bot.send_message(ctx.message.author, embed=message)\n except ErrorGettingStatus as e:\n await self.bot.send_message(ctx.message.author, \"Status unknown right now.\")\n print(\"Error getting status. Response code was \" + str(e.status))",
"async def __send_alarm(self, context: ContextTypes.DEFAULT_TYPE) -> None:\n if self.door_status.update_status():\n await context.bot.send_message(\n MESKOID,\n text=f\"🐙{self.door_status.last_line}\",\n )\n await context.bot.send_message(\n QKZKID,\n text=f\"🐙{self.door_status.last_line}\",\n )\n elif self.__verbose:\n await context.bot.send_message(\n context.job.chat_id,\n text=f\"🚀unedited - {self.door_status.last_edit}.\",\n )",
"def sendTweets(self):\n\n if self.__status_type == 'link':\n\n for index, item in self.list.iterrows():\n\n title = item['title']\n url = item['url']\n message = (url + \" \" + title)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'single_msg':\n\n message = (self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n elif self.__status_type == 'reply':\n\n for index, item in self.list.iterrows():\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n try:\n if self.__image == None:\n self.__api.update_status(status=message, in_reply_to_status_id=item['id'])\n else:\n self.__api.update_with_media(filename=self.__image, status=message,\n in_reply_to_status_id=item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"reply status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'at':\n\n for index, item in self.list.iterrows():\n\n try:\n\n message = (\".@\" + item['user'] + \" \" + self.__status)[0:140]\n\n if self.__image == None:\n self.__api.update_status(status=message)\n else:\n self.__api.update_with_media(filename=self.__image, status=message)\n\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n elif self.__status_type == 'rt':\n\n for index, item in self.list.iterrows():\n try:\n self.__api.retweet(item['id'])\n except KeyError:\n print(\"List does not include necessary column(s).\")\n print(\"at status type used when generating list based on Twitter search.\")\n print(\"Change search_on to twitter and create list.\")\n return\n\n else:\n print(\"Invalid status type. Change status type through configure_tweet method.\")\n\n return",
"def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)",
"def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)",
"def send_tweet(tweet_text):\n twitter.update_status(status=tweet_text)",
"def send_client_status( ok, msg, client_key, from_name, get_client_list_func, send_message_func, game=None ):\n status_type = messageActions.Action_status.TYPE_CLIENT\n\n new_client_message = message.Message( client_key, 's' )\n new_message = new_client_message.new_message( from_name, status_type, ok, msg )\n new_client_message.message = new_message\n new_client_message.to_clients = get_client_list_func( [ client_key ], game )\n\n send_message_func( new_client_message )",
"def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away):\n print \"status changed for\",username"
]
| [
"0.7099396",
"0.7079799",
"0.6860571",
"0.6767609",
"0.673753",
"0.672401",
"0.6702105",
"0.6679856",
"0.6679856",
"0.6644952",
"0.66088927",
"0.6600472",
"0.6588556",
"0.65812016",
"0.6564508",
"0.6503747",
"0.64562637",
"0.6452571",
"0.6448351",
"0.6413562",
"0.64076763",
"0.6401488",
"0.6400741",
"0.6390619",
"0.63859797",
"0.6365825",
"0.6365825",
"0.6365825",
"0.6362556",
"0.6360589"
]
| 0.7468175 | 0 |
Add one record into set. | def add(self, record):
self._hist_records[record.uid] = record | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))",
"def _add_record(self, datetime_, hash_):\n assert isinstance(datetime_, datetime)\n assert isinstance(hash_, str)\n record = {'datetime': datetime_, 'hash': hash_, 'artifacts': self.artifacts}\n self.logger.debug(f'Adding record: {record}')\n self.db_collection.update_one(self.query, {'$addToSet': {'records': record}})",
"def add(self, record):\n if record.name != 'consensus':\n self.members.append(record)",
"def add(self, item):\n self.update(set([item]))",
"def add(self, rec):\n rec['ts'] = datetime.now()\n self.collection.insert_one(rec)",
"def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False",
"def add(self, stock_record):\n if stock_record.symbol in self._records:\n raise StockRecordExistsError(stock_record.symbol)\n self._records[stock_record.symbol] = stock_record",
"def add_record(self, record: EventRecord) -> None:\n with self.session.begin() as session:\n session.add(record)",
"def add(self, record, db, name='', key='', index=None):\n self._establishIndexes(db)\n\n if key and name:\n raise ValueError, \"both name and key are supplied: name:%s, key:%s\" % (name, key)\n if name: key=name\n\n if index is None: index = self.count(db) # this works because the index starts at 0\n \n container = self._container(db)\n\n refsetTable = self._refsetTableFactory()\n row = refsetTable()\n row.containerlabel = self.name\n row.container = container\n row.element = record\n row.elementlabel = key# or record.getTableName()\n row.elementindex = index\n row = db.insertRow(row)\n\n return db.query(record.__class__).filter_by(id=record.id).one()",
"def insert(self, val):\n if val in self.record:\n return False\n \n self.record[val] = len(self.data)\n self.data.append(val)\n return True",
"def add_record(self, record):\n pass",
"def add_to_set(self, item, reload=True):\n index = len(self)\n item = self._prepare_item(index, item)\n data = self._field.item_field.to_mongo(self.__document__, item)\n\n qs = self._get_queryset()\n qs.update_one({'$addToSet': {self.__field_name__: data}})\n\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n\n self.__log__.append(SetAddToSet(value=item))\n\n if reload:\n self.reload()",
"def add_record(self, record):\n # Store the domain as the key, and the rest as value.\n new_key = \"{0},{1}\".format(record.get_domain(), record.get_record_type())\n self._records[new_key] = record",
"def add(self, data):\n if self._filter(data):\n id = self.db._generate_id(data)\n \n if not id == None:\n if self.db._store:\n self.db.append(id, str(data))\n print id, \"stored to\", self.db._generate_path(id)\n else:\n print id\n print data.show2()",
"def add(self, item):\n if not (item in self.set):\n self.set[item] = True\n heapq.heappush(self.heap, item)",
"def add(self, rec):\n #print(\"ADD REC={}\".format(rec))\n if self._disable_insert:\n return\n if self._is_mem:\n key = '#'.join([rec['ts'], rec['user'], rec['narr']])\n if key in self._rkeys:\n self._add_duplicate()\n self._rkeys.add(key)\n cursor = self._sq.cursor()\n rec['name'] = rec['name'][19:] # strip 'biokbase.narrative.'\n values = [rec[c] for c in self.COLUMNS]\n ivalues = []\n for v in values:\n if isinstance(v, float):\n ivalues.append('{:f}'.format(v))\n else:\n ivalues.append('\"' + v + '\"')\n stmt = self._insert_stmt.format(values=','.join(ivalues))\n # add record\n try:\n cursor.execute(stmt)\n except sqlite3.IntegrityError:\n self._add_duplicate()\n cursor.close()",
"def test_single(self):\n s = djset()\n s.add([1, 2, 3])\n self.assertEquals({1, 2, 3}, s.data[1])",
"def add_record(self, data):\n if self.current_trip is None:\n print \"no trip to add data\"\n return\n self.current_trip.store_data(data)",
"def add_sets(self, key, member):\n return self.redis.sadd(key, member)",
"def save_data(self, record):\n self.dbm.addRecord(record)",
"def add_set(self, repres):\n s = self.set_indx(repres)\n if not s is None:\n raise Exception\n self._data.append(set(repres))",
"def add_record(self, record: Optional[Record] = None, **kwargs):\n\n if record is None:\n record = Record(**kwargs)\n else:\n record.update(**kwargs)\n\n return self.db.insert_record(record=record)",
"def setRecord(self,record):\n idLower = record.getId().lower()\n type = record.name\n typeIds = self.indexed[type]\n if idLower in typeIds:\n oldRecord = typeIds[idLower]\n index = self.records.index(oldRecord)\n self.records[index] = record\n else:\n self.records.append(record)\n typeIds[idLower] = record",
"def append(self, obj):\r\n self.record_count += 1\r\n \r\n if type(obj) == dict:\r\n self._probe_record(obj)\r\n else:\r\n self._probe_row(obj)",
"def insert(self, val):\n new_item = False\n if val not in self.ds:\n self.ds.add(val)\n self.keys.append(val)\n new_item = True\n return new_item",
"def add(self, other):\n self._check_item(other)\n self._set.add(other)",
"def put_record(self, record):\r\n row = [record.get(field) for field in self.fields.names()]\r\n\r\n self.put(row)",
"def add(self, elem):\n self.data.append(elem)\n self._prune()",
"def add_record(self, msg_id, rec):\n if msg_id in self._records:\n raise KeyError(\"Already have msg_id %r\" % (msg_id))\n self._check_dates(rec)\n self._records[msg_id] = rec\n self._add_bytes(rec)\n self._maybe_cull()",
"def add(self, x):\n if x not in self:\n self._seen.add(x)\n self._list.append(x)\n return True\n return False"
]
| [
"0.6866645",
"0.6579463",
"0.63470685",
"0.62643814",
"0.6259924",
"0.6255585",
"0.6249951",
"0.6244476",
"0.6234149",
"0.61436373",
"0.61233157",
"0.61089814",
"0.6103372",
"0.6085331",
"0.60585153",
"0.6054745",
"0.6037263",
"0.59963894",
"0.59255654",
"0.5917086",
"0.5911691",
"0.59105694",
"0.59073013",
"0.58650845",
"0.58591205",
"0.58415896",
"0.5833849",
"0.5808914",
"0.58089066",
"0.5787575"
]
| 0.67170227 | 1 |
Back up output to local path. | def backup_output_path(self):
backup_path = TaskOps().backup_base_path
if backup_path is None:
return
FileOps.copy_folder(TaskOps().local_output_path, backup_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backup_command(server, output):\n # Stop saving chunks\n server.save_off()\n # Run the external save program\n subprocess.call(CONFIG['backup_command']['script'].split())\n # Start saving chunks again\n server.save_on()\n return",
"def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)",
"def makeBackup(self):\n #--File Path\n original = self.path\n #--Backup\n backup = self.path+'.bak'\n shutil.copy(original,backup)\n #--First backup\n firstBackup = self.path+'.baf'\n if not os.path.exists(firstBackup):\n shutil.copy(original,firstBackup)",
"def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')",
"def backup_file(cloud, input_file, output_file):\n if cloud.find_one(path=output_file):\n return False\n\n print \"Backing up file:\", input_file, \"->\", output_file\n cloud.store_from_filename(input_file, output_file)\n\n return True",
"def backup(self):\n self.logger.info(\"Backing up current version of model...\")\n self.save_checkpoint(filename='backup.pth.tar')",
"def backup_file(file_path: Path):\n src = str(file_path)\n dst = f\"{file_path}.backup.{datetime.now().isoformat()}\"\n\n print(f\"Backing Up: {src} -> {dst}\")\n copy(src, dst)",
"def backup():\n local_filename = get_backup_filename(hostname=socket.gethostname())\n local(BACKUP_COMMAND + local_filename)\n\n return local_filename",
"def backup(self, outdir=None):\n import os\n if outdir is None:\n import time\n outdir = os.path.join('backup',time.strftime('%Y%m%d-%H%M'))\n cmd = 'time mongodump -c \"%s\" -h %s:%s -d mfdb -o \"%s\"'%(\n self.collection.name, self.db.host, self.db.port, outdir)\n print cmd\n os.system(cmd)",
"def archive_backup(self):\n\n # Archiving the Training script\n shutil.copyfile(self.script_path, self.save_path + '/0-' + os.path.basename(self.script_path))\n os.chmod(self.save_path + '/0-' + os.path.basename(self.script_path), 0o755)\n # Archiving the src folder\n pkg_path = os.path.dirname(arch_src)\n backup_path = os.path.join(self.save_path, 'src_backup')\n shutil.make_archive(backup_path, 'gztar', pkg_path)\n\n # Archiving the Environment Info\n env_info = collect_env.get_pretty_env_info()\n with open(self.save_path + '/env_info.txt', 'w') as f:\n f.write(env_info)",
"def backup_remote():\n remote_filename = get_backup_filename(hostname=env.host_string)\n print(\"Remote filename: \" + remote_filename)\n\n with cd('bookmarker'):\n run('source env/bin/activate && ' + BACKUP_COMMAND + remote_filename)\n # scp the remote backup file to local.\n get(remote_filename, remote_filename)\n\n return remote_filename",
"def dir_backup():\n return abspath('back')",
"def run_backup():\n host = re.search(\"([\\w.-]+)[:]?\", env.host).group()\n date = time.strftime('%Y%m%d%H%M%S')\n fname = '%(host)s-backup-%(date)s.gz' % {'date': date, 'host': host}\n green(\"Ingrese la contraseña de la clave privada local.\")\n sudo(\"pg_dump kine | gzip > /tmp/%s\" % fname, user=\"postgres\")\n get(\"/tmp/%s\" % fname, os.path.join(backup_dir, fname))\n sudo(\"rm /tmp/%s\" % fname, user=\"postgres\")",
"def bless_output(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n expected_output_file = path.splitext(self.source_name)[0] + \".expected\"\n if path.exists(expected_output_file):\n os.unlink(expected_output_file)\n os.rename(actual_output_file, expected_output_file)",
"def backup(self):\n\n\t\twith temp_dir(self.path):\n\t\t\t# only if changes made\n\t\t\tcheck = sp.check_output(['git', 'status', '--porcelain'])\n\t\t\t# check if untracked files\n\t\t\tuntracked = sp.check_output(['git', 'ls-files', '--others', '--exclude-standard'])\n\n\t\t\tif check:\n\t\t\t\tif untracked:\n\t\t\t\t\t# just add them all ... probably a better/safer/more direct way to do this\n\t\t\t\t\t_ = sp.check_output(['git', 'add', '.'])\n\t\t\t\t_ = sp.check_output([\n\t\t\t\t\t\t\"git\", \"commit\", \"-am\", f\"AUTO update on {dt.date.today().isoformat()}\"])\n\n\t\t\t# presumes that there is a remote!\n\t\t\toutput = sp.check_output([\n\t\t\t\t\t\"git\", \"push\"],\n\t\t\t\t\tstderr=sp.STDOUT\n\t\t\t\t\t)\n\n\t\t\treturn output.decode()\n\t\t\t# else:\n\t\t\t# \treturn 'No changes to commit'",
"def _backup_meta_data(meta_path: Path) -> None:\n meta_path = meta_path.resolve()\n backup_meta_path = meta_path.parent / (meta_path.name + \".bak\")\n i = 0\n while backup_meta_path.exists():\n backup_meta_path = backup_meta_path.with_suffix(\".bak{}\".format(i))\n i += 1\n shutil.copy(str(meta_path), str(backup_meta_path))",
"def copy_db():\n with cd(\"/tmp\"), lcd(\"/tmp\"):\n sudo(\"pg_dump gsi > /tmp/latest.sql\", user=\"postgres\")\n run(\"tar zcvf latest.sql.tgz latest.sql\")\n get(\"/tmp/latest.sql.tgz\", \"latest.sql.tgz\")\n sudo(\"rm /tmp/latest.sql.tgz /tmp/latest.sql\")",
"def _backupLog(self, updateText):\n \taFile = \"archiving_log.txt\"\n \tos.rename( aFile, aFile+\"~\")\n \tdestination= open( aFile, \"w\" )\n \tsource= open( aFile+\"~\", \"r\" )\n \tfor line in source:\n \t\tdestination.write( line )\n \tdestination.write( str(updateText))\n \tsource.close()\n \tdestination.close()\n \tos.remove(aFile+\"~\")",
"def export(self, location):\n temp_dir = tempfile.mkdtemp('-export', 'pip-')\n self.unpack(temp_dir)\n try:\n call_subprocess(\n [self.cmd, 'archive', location],\n filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)\n finally:\n rmtree(temp_dir)",
"def backup(self):\n if self.url is not None:\n\n # zip backup folder\n zipapp.create_archive(self.logs_directory, self.send_zip)\n\n # then send zipped folder to the URL\n try:\n requests.post(self.url, files={\n 'uploaded_file': (os.path.basename(self.send_zip), open(self.send_zip, 'rb')),\n })\n except requests.exceptions.ConnectionError as error:\n print(error)",
"def save(self, url, output):\n\n shutil.copy2(self.get(url), output)",
"def BT_export(self):\n src = os.path.join(self.resMan.base_path, Config.instance().weld_BT_root_folder)\n srcs=self.BTMan.get_subdirs(src)\n dst = os.path.join(self.project.rootdir, Config.instance().weld_BT_root_folder)\n #this operation has lots of exceptions to output...\n try:\n for src in srcs:\n self.BTMan.export(src, dst)\n except Exception, e:\n print >> sys.__stderr, 'ERROR in Weld.BT_export():'\n print >> sys.__stderr, e.args[0]\n print >> sys.__stderr, 'export cancelled (some cleanup might be needed in %s)' % dst",
"def __makeBackup(self):\n pass #FIXME!!!",
"def save_copy_log(self):\n if self.packaging_wrapper and self.finish_log:\n with open(\n '{}/copy_log.log'.format(self.packaging_wrapper.dest_root),\n 'w') as log_file:\n self.finish_log[0] = self.finish_log[0].replace('\\n\\n', '')\n log_file.writelines(self.finish_log)",
"def fRenderTargetBackupTab():\n node = nuke.thisNode()\n # create tab an button\n tab = nuke.Tab_Knob(\"fRenderTargetBackup_tab\",\"Backup Renders\")\n button = nuke.PyScript_Knob('backup')\n button.setCommand('import dmptools.utils.nukeCommands as nc;nc.fRenderTargetBackup()')\n button.setName('backup renders')\n button.setLabel('backup!')\n button.setTooltip('backup renders to a directory in /tmp/fRenderTarget/<current time>')\n \n # create checkbox\n checkBox = nuke.Boolean_Knob(\"userCustomPath\",\"Use custom path\")\n checkBox.setValue(False)\n # add output textfield\n output = nuke.File_Knob('output', 'backup path')\n output.setValue('/tmp/fRenderTarget/')\n\n # add knobs to the node\n node.addKnob(tab)\n node.addKnob(button)\n node.addKnob(checkBox)\n node.addKnob(output)",
"def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")",
"def backup(self):\n\n if not File.backup_text(self.get_title()): return\n if Settings.get_destination() == \"remote\":\n Remote.upload_file(self)\n elif Settings.get_destination() == \"google\":\n Google.upload_file(file=self)\n else:\n # move file to local backup location\n backupPath = os.path.join(Settings.get_local_path(), \"posted\")\n backupPath = os.path.join(backupPath, self.category, self.get_title())\n shutil.move(self.get_path(), backupPath)",
"def tmp_backup(path):\n if not _os.path.isfile(path):\n raise FileNotExistError(path)\n tmpdir = _tempfile.mkdtemp()\n _shutil.copy2(path, tmpdir)\n return _os.path.join(tmpdir, _os.path.basename(path))",
"def backup(self):\n\n for filename in self.filenames[:]:\n if not filename.endswith(\".\"+self.PYTHON_EXTENSION):\n continue\n origfilename = filename + \".\" + self.BACKUP_EXTENSION\n if origfilename not in self.filenames:\n shutil.copy(filename, origfilename)\n self.filenames.append(origfilename)",
"def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)"
]
| [
"0.6644395",
"0.6394173",
"0.5991687",
"0.59489906",
"0.59429973",
"0.5856808",
"0.5855813",
"0.5798508",
"0.57897097",
"0.564352",
"0.5574243",
"0.5509337",
"0.54634744",
"0.5453597",
"0.5429871",
"0.537353",
"0.5356973",
"0.5346617",
"0.5346035",
"0.53141814",
"0.5309788",
"0.53050995",
"0.53037256",
"0.5292492",
"0.5273521",
"0.5272081",
"0.5269895",
"0.5249618",
"0.5224934",
"0.52203506"
]
| 0.68394625 | 0 |
Output step all records. | def output_step_all_records(self, step_name, desc=True, weights_file=True, performance=True):
records = self.all_records
logging.debug("All records in report, records={}".format(self.all_records))
records = list(filter(lambda x: x.step_name == step_name, records))
logging.debug("Filter step records, records={}".format(records))
if not records:
logging.warning("Failed to dump records, report is emplty.")
return
self._output_records(step_name, records, desc, weights_file, performance)
logging.info(self.print_best(step_name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n print('#' + '\\t'.join(OutputRecord.get_header_fields()),\n file=self.args.output_tsv)\n for chunk in chunk_by_query(self.sam_file, expand_xa=True):\n #print('STARTING CHUNK', file=sys.stderr)\n if not chunk:\n continue # ignore empty chunks\n self.handle_chunk(chunk)",
"def record_all(self):\n for i in self.recorders:\n t = i[0]\n r = i[1]\n self.add_row(t, r())",
"def write(self, output_stream=sys.stdout):\n for model in self.models:\n if len(model.chains) == 0:\n continue\n if len(self.models) > 1:\n print(\"MODEL %4d\" % (model.number), file=output_stream)\n model.write(output_stream)\n if len(self.models) > 1:\n print(\"ENDMDL\", file=output_stream)\n print(\"END\", file=output_stream)",
"def write_dag(self, out=sys.stdout):\n for rec in sorted(self.values()):\n print(rec, file=out)",
"def write_output(self):\n self.tcex.log.info('Writing Output')\n self.tcex.log.info(type(self.all_items))\n self.tcex.log.info(len(self.all_items))\n self.tcex.playbook.create_output('firework_alert.json', self.all_items)",
"def report(self):\n for c in self._call_chain:\n print c.title\n print '=' * len(c.title)\n c.report()\n print",
"def pp_all_batches(self):\n for batch in self.batch_headers:\n self.pp_batch(batch)",
"def output_data(self):\n if not self.is_record:\n logging.error('Env: no record to output!')\n else:\n control_data = pd.DataFrame(self.control_data)\n control_data.to_csv(self.output_path + ('%s_%s_control.csv' % (self.name, self.agent)))",
"def genout(self):\n ch = self.buffer_output()\n while ch:\n print(ch, end='')\n ch = self.buffer_output()",
"def run(self):\n report_file = self.get_report_file_name()\n self.export_records_to_file(report_file)\n print(\"Report file ({}) generated.\".format(report_file))",
"def write_all(self):\r\n pass",
"def print_results(self):\n pass",
"def sequential_print_statements():\n pass",
"def run(self) -> Any:\n self.prepare()\n for step in self.stream:\n self.output = step\n return self.output",
"def OutputTestRecord(self, test_record):\n for output_cb in self.output_callbacks:\n output_cb(test_record)",
"def result(self, step):\n indent_extra = 0\n if self.current_rule:\n indent_extra = self.indent_size\n\n step = self.steps.pop(0)\n indent = make_indentation(2 * self.indent_size + indent_extra)\n if self.show_aligned_keywords:\n # -- RIGHT-ALIGN KEYWORDS (max. keyword width: 6):\n text = u\"%s%6s %s ... \" % (indent, step.keyword, step.name)\n else:\n text = u\"%s%s %s ... \" % (indent, step.keyword, step.name)\n self.stream.write(text)\n\n status_text = step.status.name\n if self.show_timings:\n status_text += \" in %0.3fs\" % step.duration\n\n unicode_errors = 0\n if step.error_message:\n try:\n self.stream.write(u\"%s\\n%s\\n\" % (status_text, step.error_message))\n except UnicodeError as e:\n unicode_errors += 1\n self.stream.write(u\"%s\\n\" % status_text)\n self.stream.write(u\"%s while writing error message: %s\\n\" % \\\n (e.__class__.__name__, e))\n if self.RAISE_OUTPUT_ERRORS:\n raise\n else:\n self.stream.write(u\"%s\\n\" % status_text)\n\n if self.show_multiline:\n if step.text:\n try:\n self.doc_string(step.text)\n except UnicodeError as e:\n unicode_errors += 1\n self.stream.write(u\"%s while writing docstring: %s\\n\" % \\\n (e.__class__.__name__, e))\n if self.RAISE_OUTPUT_ERRORS:\n raise\n if step.table:\n self.table(step.table)",
"def log_all(self):\n self.save_raw()\n self.log()",
"def printCsv(self):\n self.printCsvHeader()\n for r in self._records:\n r.printCsv()",
"def print(self):\n self.__print_local(self.dataset, 0)",
"def printOutput(self):\n pass",
"def handle_output(all_rows, opts):\n for dev, rows in all_rows.iteritems():\n\n if opts.csv:\n writer = csv.writer(sys.stdout)\n for row in rows:\n writer.writerow([dev] + row)\n elif opts.dotty:\n continue\n elif opts.sqldb:\n write_sqldb(opts.sqldb, dev, rows)\n else:\n print 'DEVICE: {}'.format(dev)\n print_table(rows)",
"def generate_results():\n for idx, examples in enumerate(generate_groups()):\n # converting iterators to list so resources\n # are not shared in concurrent workers\n yield write_tfrecord(\n examples=examples,\n encode_fn=encode_fn,\n file_name=tfrecord_name.format(idx))",
"def print_out():\n pass",
"def save_all_summaries(writer: SummaryWriter, step: int) -> None:\n for summary in get_all_summaries().values():\n summary.save(writer, step)",
"def record(self, step):",
"def write_output(self):",
"def print_the_contents_of_all_entries(self):\n\n if len(self.student_list):\n self.print_dataframe(self.student_list)\n else:\n print('There is no contents to show')",
"def run_verbose(self, steps = 10):\n for step in range(steps):\n if self.is_done():\n print 'Done, stopping.'\n print self.to_string()\n return\n print self.to_string()\n self.step()",
"def dumpo(self):\n return self.do_all()",
"def printme(self):\n sys.stdout.write(self._header)\n for k in range(len(self)):\n sys.stdout.write(self.line(k))"
]
| [
"0.6829503",
"0.63628876",
"0.62389874",
"0.60432297",
"0.60114896",
"0.5987003",
"0.59672743",
"0.5898101",
"0.5888029",
"0.5881636",
"0.5874173",
"0.58740133",
"0.5855136",
"0.58379674",
"0.5837826",
"0.5834566",
"0.58343154",
"0.57998765",
"0.5793351",
"0.5792536",
"0.5772966",
"0.57628155",
"0.5741186",
"0.57385194",
"0.573348",
"0.57237124",
"0.5672816",
"0.56694824",
"0.5664958",
"0.56397915"
]
| 0.72609735 | 0 |
Dump report to file. | def dump(self):
try:
_file = FileOps.join_path(TaskOps().step_path, "reports.csv")
FileOps.make_base_dir(_file)
data = self.all_records
data_dict = {}
for step in data:
step_data = step.serialize().items()
for k, v in step_data:
if k in data_dict:
data_dict[k].append(v)
else:
data_dict[k] = [v]
data = pd.DataFrame(data_dict)
data.to_csv(_file, index=False)
_file = os.path.join(TaskOps().step_path, ".reports")
_dump_data = [ReportServer._hist_records, ReportServer.__instances__]
with open(_file, "wb") as f:
pickle.dump(_dump_data, f, protocol=pickle.HIGHEST_PROTOCOL)
self.backup_output_path()
except Exception:
logging.warning(traceback.format_exc()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def report(self, output_dir):",
"def export_records_to_file(self, report_file):\n self.write_to_file(report_file, self.construct_report_columns())\n for record in self.database.fetch_sensor_data_grouped_by_date():\n date_report = self.construct_date_report(record)\n print(\"{},{}\".format(*date_report))\n self.database.add_report_record(*date_report)\n self.write_to_file(report_file, \"{},{}\".format(*date_report))",
"def save_report(dataframe, filename, index=True):\n out_file = reports_path / filename\n dataframe.to_csv(out_file, index=index)\n logging.info(f\"Written report to {out_file.resolve()}\")",
"def run(self):\n report_file = self.get_report_file_name()\n self.export_records_to_file(report_file)\n print(\"Report file ({}) generated.\".format(report_file))",
"def report(db, openfile):\n pass",
"def report(self, **options):\n pass",
"def dump(self, filename=None):\n if filename is None:\n current_datetime = datetime.datetime.now()\n filename = current_datetime.strftime(\"results_%Y%m%d%H%M%S.csv\")\n print(\"Writing results to \\\"{}\\\"\".format(filename))\n identifiers = \"\"\n values = \"\"\n for entry in self.log.items():\n identifiers += ((\";\" if len(identifiers) > 0 else \"\")\n + str(entry[0]))\n values += \";\" + str(entry[1]) if len(values) > 0 else str(entry[1])\n with open(filename, 'a') as f:\n f.write(identifiers + \"\\n\")\n f.write(values + \"\\n\")",
"def writeDebugReport(self, name, pyfile=None, plugin=None):\n\n dump_name = \"debug_%s_%s.zip\" % (name, strftime(\"%d-%m-%Y_%H-%M-%S\"))\n if pyfile:\n dump = self.getFileDump(pyfile)\n else:\n dump = self.getPluginDump(plugin)\n\n try:\n import zipfile\n\n zip = zipfile.ZipFile(dump_name, \"w\")\n\n if exists(join(\"tmp\", name)):\n for f in listdir(join(\"tmp\", name)):\n try:\n # avoid encoding errors\n zip.write(join(\"tmp\", name, f), save_join(name, f))\n except:\n pass\n\n info = zipfile.ZipInfo(save_join(name, \"debug_Report.txt\"), gmtime())\n info.external_attr = 0644 << 16L # change permissions\n zip.writestr(info, dump)\n\n info = zipfile.ZipInfo(save_join(name, \"system_Report.txt\"), gmtime())\n info.external_attr = 0644 << 16L\n zip.writestr(info, self.getSystemDump())\n\n zip.close()\n\n if not stat(dump_name).st_size:\n raise Exception(\"Empty Zipfile\")\n\n except Exception, e:\n self.log.debug(\"Error creating zip file: %s\" % e)\n\n dump_name = dump_name.replace(\".zip\", \".txt\")\n f = open(dump_name, \"wb\")\n f.write(dump)\n f.close()\n\n self.log.info(\"Debug Report written to %s\" % dump_name)\n return dump_name",
"def report():\n pass",
"def report(self, filename):\n\n writer = csv.writer(open(filename, 'wt'))\n\n for obj, msg in self.errors:\n writer.writerow([msg] + obj.original_record)",
"def _write_report_to_file(self,\n report_entities: str,\n report_tokens: str,\n epoch: int,\n tr_loss: float,\n val_loss: float):\n with open(os.path.join(self.output_dir, 'classification_report.txt'), 'w') as f:\n f.write(report_entities)\n f.write(f'\\n{report_tokens}')\n f.write(f\"\\nEpoch: {epoch} \"\n f\"\\n- Training Loss: {tr_loss}\"\n f\"\\n- Validation Loss: {val_loss}\")",
"def report(self, report_options=None):\n raise NotImplementedError()",
"def save_markdown_report(self, **kwargs):\n save_dir = os.path.dirname(self.file_paths[0])\n timestamp = datetime.datetime.utcnow().strftime(\"%Y-%j-%Hh%Mm%Ss\")\n markdown_file_name = \"report_{}.md\".format(timestamp)\n markdown_file_path = os.path.join(save_dir, markdown_file_name)\n report_str = self.generate_report()\n with open(markdown_file_path, \"w\") as md_file:\n md_file.write(report_str)",
"def dump(self, filename):\n # WorkflowTestCase.dump(filename, self._workflow_test_suite_configuration)\n raise Exception(\"Not implemented yet!\")",
"def open(self):\r\n safe_mkdir(os.path.dirname(self._html_dir))\r\n self._report_file = open(self.report_path(), 'w')",
"def saveReport(self,saveDir:str)->None:\n reportFile = open(saveDir + '/history.txt', 'w')\n reportFile.write(self.report.get('0.0', END))\n reportFile.close()",
"def dump(self, filename):\n suffix = filename.split(\".\")[-1]\n if not suffix == \"dflx\":\n filename = filename + \".dflx\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n f = open(filename, \"wb\")\n pickle.dump(self.meta, f)\n pickle.dump(self.__dict__, f)\n f.close()\n logging.info(\"Results dumped to %s.\", filename)",
"def dump(log, file):\n file.write('FSH|%s|PyDL7|ZXU|%s|\\n' %\n (log.metadata.get('encoding_characters', '^~<>{}'),\n log.created.strftime('%Y%m%d%H%M%S')))\n file.write('ZRH|%s|%s|%s|%s|%s|%s|%s|%s|\\n' %\n (log.metadata.get('encoding_characters', '^~<>{}'),\n log.computer_model,\n log.computer_serial,\n log.depth_pressure_unit,\n log.altitude_unit,\n log.temperature_unit,\n log.tank_pressure_unit,\n log.tank_volume_unit))\n for dive in log.dives:\n file.write('ZDH|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\\n' %\n (dive.metadata.get('export_sequence', dive.sequence_number),\n dive.sequence_number,\n dive.metadata.get('record_type', 'M'),\n dive.recording_interval,\n dive.leave_surface_time.strftime('%Y%m%d%H%M%S'),\n dive.air_temperature,\n dive.tank_volume,\n dive.O2_mode,\n dive.rebreather_diluent_gas,\n dive.altitude))\n if dive.record:\n file.write('ZDP{\\n')\n for detail in dive.record:\n file.write('|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\\n' %\n (detail.elapsed_time,\n detail.depth,\n detail.gas_switch,\n detail.current_PO2,\n str(detail.ascent_rate_violation)[0],\n str(detail.decompression_violation)[0],\n detail.current_ceiling,\n detail.water_temperature,\n detail.warning_number,\n detail.main_cylinder_pressure,\n detail.diluent_cylinder_pressure,\n detail.oxygen_flow_rate,\n detail.CNS_toxicity,\n detail.OUT,\n detail.ascent_rate))\n file.write('ZDP}\\n')\n file.write('ZDT|%s|%s|%s|%s|%s|%s|\\n' %\n (dive.metadata.get('export_sequence', dive.sequence_number),\n dive.sequence_number,\n dive.max_depth,\n dive.reach_surface_time.strftime('%Y%m%d%H%M%S'),\n dive.min_water_temperature,\n dive.pressure_drop))",
"def save(self, report_file):\n\n with open(report_file, \"w\", encoding=\"utf-8\") as report:\n csv_writer = csv.writer(report, delimiter=\",\", lineterminator=\"\\n\", quoting=csv.QUOTE_ALL)\n csv_writer.writerow([self._name, \"Lines Of Code\"])\n for region in self._regions:\n csv_writer.writerow([region.label(), region.loc()])",
"def write_report(self):\n\n def report_array(f, label, array):\n f.write(label)\n for val in array:\n f.write('{:.4f},\\t'.format(val))\n f.write('\\n')\n\n report_file = FLAGS.report_file\n\n with open(report_file, 'w') as f:\n f.write('Mean Error 2D: {}\\n'.format(\n safe_divide(self._error_2d, self._matched)))\n f.write('Mean 3D IoU: {}\\n'.format(\n safe_divide(self._iou_3d, self._matched)))\n f.write('Mean Azimuth Error: {}\\n'.format(\n safe_divide(self._azimuth_error, self._matched)))\n f.write('Mean Polar Error: {}\\n'.format(\n safe_divide(self._polar_error, self._matched)))\n\n f.write('\\n')\n f.write('IoU Thresholds: ')\n for threshold in self._iou_thresholds:\n f.write('{:.4f},\\t'.format(threshold))\n f.write('\\n')\n report_array(f, 'AP @3D IoU : ', self._iou_ap.aps)\n\n f.write('\\n')\n f.write('2D Thresholds : ')\n for threshold in self._pixel_thresholds:\n f.write('{:.4f},\\t'.format(threshold * 0.1))\n f.write('\\n')\n report_array(f, 'AP @2D Pixel : ', self._pixel_ap.aps)\n f.write('\\n')\n\n f.write('Azimuth Thresh: ')\n for threshold in self._azimuth_thresholds:\n f.write('{:.4f},\\t'.format(threshold * 0.1))\n f.write('\\n')\n report_array(f, 'AP @Azimuth : ', self._azimuth_ap.aps)\n f.write('\\n')\n\n f.write('Polar Thresh : ')\n for threshold in self._polar_thresholds:\n f.write('{:.4f},\\t'.format(threshold * 0.1))\n f.write('\\n')\n report_array(f, 'AP @Polar : ', self._polar_ap.aps)",
"def export_to_report_file(self, obj, export_format='csv', **kwargs):\n report_file = kwargs.get('report_file', None)\n\n if not report_file:\n report_file = \"{}_{}.{}\".format(\n type(obj).__name__, pytan.utils.get_now(), export_format,\n )\n m = \"No report file name supplied, generated name: {!r}\".format\n self.mylog.debug(m(report_file))\n\n clean_keys = ['obj', 'export_format', 'contents', 'report_file']\n clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)\n\n # get the results of exporting the object\n contents = self.export_obj(obj=obj, export_format=export_format, **clean_kwargs)\n report_path = self.create_report_file(\n report_file=report_file, contents=contents, **clean_kwargs\n )\n return report_path, contents",
"def _toFile(self):\n pass",
"def export_to_file(self):\r\n return True",
"def copy_report(cls, req):\n try:\n if req.report:\n report_url = cls.dvs_api_v1 + '/download/' + req.report\n upload_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(req.tracking_id))\n response = requests.post(url=report_url)\n file = open(os.path.join(upload_path, req.report), \"w+\")\n file.write(response.text)\n file.close()\n except Exception as e:\n app.logger.exception(e)\n raise e",
"def dump(self, filename, mode='w', rebox=False):\n from os import path\n filepath = path.abspath(path.expanduser(filename))\n if mode == 'w':\n open(filepath, 'w').close() \n for t, ts in self:\n ts.dump(filename, rebox=rebox)",
"def writeReport(sourceFile, sub, scan, ic, stats, fout, delim='\\t'):\n data = [ic, sourceFile, sub, scan] + stats.split()\n fout.write(delim.join(data) + '\\n')",
"def cleaning_file():\n f = open (\"report_for_judy_part2.txt\", \"w\")\n f.close()",
"def write_error_report(self):\n\n with open('runReport.txt', 'a') as report:\n report.write(\"Number of Hits: \" + str(self.num_hits) + '\\n')\n report.write(\"Number of Requests: \" + str(self.num_requests) + '\\n')\n report.write(\"Hit Rate: \" + str((self.num_hits / self.num_requests)))\n report.write(\"Datafiles downloaded: \" + str(self.num_datafiles))\n now = datetime.now()\n dt_string = now.strftime(\"%H:%M %m/%d/%Y\")\n report.write(\"Run finished \" + dt_string)",
"def save(self, filename: str):\n dump(self, filename)",
"def dump_to_log(self):\n # self._send_request(\"/dumpToLog\")\n pass"
]
| [
"0.68204385",
"0.6599571",
"0.6362758",
"0.6291036",
"0.6244349",
"0.6243112",
"0.6230732",
"0.6227176",
"0.61734873",
"0.6153664",
"0.6133584",
"0.610474",
"0.60003084",
"0.5990166",
"0.5954442",
"0.5949858",
"0.59321535",
"0.5924271",
"0.59219396",
"0.59174067",
"0.5915938",
"0.5909081",
"0.59002566",
"0.5883732",
"0.58748376",
"0.58591175",
"0.5857789",
"0.58511055",
"0.5842844",
"0.5835504"
]
| 0.70031875 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.