query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Converts an atom into a plush gene.
def atom_to_plush_gene(self, atom): is_literal = False proc_atom = None if callable(atom): # If it is callable, then it is likely a function that will # produce a literal. fn_element = atom() if callable(fn_element): # It's another function! proc_atom = fn_element() else: proc_atom = fn_element is_literal = True else: # If atom is not callable, then it is the instruction/literal. proc_atom = atom is_literal = not isinstance(proc_atom, Instruction) return Gene(proc_atom, is_literal, self.generate_close_count())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_atom(self, atom):\n\t\treturn atom", "def from_symbol_to_entrez_gene_id(row):\r\n\tgene_entry = annotation_client.get_entrez_gene_id_from_symbol(row['symb'])\r\n\t# import pdb; pdb.set_trace()\r\n\tegid = str(gene_entry['entrez_gene_id'][0]) if gene_entry is not None else \"0\"\r\n\treturn egid", "def random_plush_gene(self):\n atom = random.choice(list(self.atom_generators))\n return self.atom_to_plush_gene(atom)", "def atom_to_id(atom):\n features = get_feature_list(atom)\n return features_to_id(features, intervals)", "def _gene_ann(gene_ann_path):\n gene_ann = pd.read_csv(gene_ann_path)\n protein_gene = gene_ann[gene_ann.gene_type ==\n 'protein_coding'].gene_name.tolist()\n return(protein_gene)", "def gene(self, idx, value):\r\n self.genes[idx] = value", "def dump_probeset2gene(db):\n\n f = \"_\".join((\"probeset2gene\",db[\"database\"], db[\"host\"], db[\"port\"],\".txt\"))\n if not os.path.exists(f):\n cmd = \"\"\"mysql -h %s -P%s -u ensadmin -pensembl \\\n -e \"select dbprimary_acc, stable_id from xref x, object_xref ox, transcript t, gene_stable_id gsi \\\n where %s and x.xref_id=ox.xref_id and t.transcript_id=ensembl_id \\\n and ox.ensembl_object_type='Transcript' \\\n and gsi.gene_id=t.gene_id group by stable_id, dbprimary_acc \" %s > %s\"\"\" % (db[\"host\"],\n db[\"port\"],\n\tAFFY_XREF_FILTER_CLAUSE,\n db[\"database\"],\n f)\n\n exec_command(cmd)\n return f", "def set_atom(self, locant, atom):\n atom.set_id(locant)\n if locant >= self._next_locant:\n self._next_locant = locant + 1\n self._atom_index[locant] = atom\n self._graph.add_vertex(atom)", "def get_equivalent_atom(self, atom):\n try:\n return self.model_dict[atom.model_id].chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def print_atom(atom):\n\n return atom[\"id\"]", "def testEnsemblToGeneFile(self):\n\n e2g = EnsemblToGeneFile(self.enstogenefile)\n\n self.assertTrue(e2g)\n\n self.assertTrue(len(e2g.geneids) == 38803)\n self.assertTrue(len(e2g.tranids) == 94647)", "def to_chromosome(chromosome):\n\n if isinstance(chromosome, make_chromosome):\n return chromosome\n else:\n return make_chromosome(chromosome)", "def replace_atom(self, locant, atom):\n self._graph.replace_node(self.get_atom_by_locant(locant), atom)\n self._atom_index[locant] = atom", "def get_equivalent_atom(self, atom):\n try:\n return self.chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def link_protein(self, protein):\n if self.protein is None:\n self.protein = protein\n protein.link_gene(self)", "def grch38_braf_genom_silent_mutation():\n params = {\n \"id\": \"normalize.variation:NC_000007.13%3Ag.140453136%3D\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.aMwnr5rEbtPQe5gXDDO2gZO_zSqN2RmH\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.aMwnr5rEbtPQe5gXDDO2gZO_zSqN2RmH\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.zga82-TpYiNmBESCfvDvAz9DyvJF98I-\",\n \"interval\": {\n \"end\": {\"value\": 140753336, \"type\": \"Number\"},\n \"start\": {\"value\": 140753335, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"A\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"genomic\",\n \"structural_type\": \"SO:0002073\",\n \"vrs_ref_allele_seq\": \"A\"\n }\n return VariationDescriptor(**params)", "def get_equivalent_atom(self, atom):\n try:\n return self.fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def gene(self, idx):\r\n return self.genes[idx]", "def gene_to_protein(gene: str, intrones: Union[str, Collection[str]]) -> str:\n intrones = intrones if not isinstance(intrones, str) else (intrones,)\n for introne in intrones:\n gene = gene.replace(introne, \"\")\n return dna_to_protein(gene)", "def get_gene(gene):\n\n return copy.deepcopy(gene)", "def map_to_mgi(adata, copy = False):\n from pybiomart import Server\n # connest to the biomart server\n server = Server(host='http://www.ensembl.org')\n\n # retrieve the mouse data set we need\n dataset = (server.marts['ENSEMBL_MART_ENSEMBL']\n .datasets['mmusculus_gene_ensembl'])\n\n # recieve the mapping from ensembl to MGI\n conv_table = dataset.query(attributes=['ensembl_gene_id', 'external_gene_name'])\n\n # we first drop duplicates in the first column\n conv_table = conv_table.drop_duplicates(conv_table.columns.values[0])\n\n # convert the gene names from the adata object to a data frame\n adata_table = pd.DataFrame(adata.var_names)\n\n # give the first column a name\n adata_table.columns = ['Gene stable ID']\n\n # change the gene table so that the ensembl names are now the index\n conv_table = conv_table.set_index('Gene stable ID')\n\n # project the names from the conversion table on the corr. names in the\n # adata var names table\n mapping = adata_table.join(conv_table, on='Gene stable ID')\n\n # how many could we not map\n not_found_mgi = sum(pd.isnull(mapping).iloc[:,1])\n\n # how many ensg symbols did we map several times?\n rep_ensg = len(mapping.iloc[:, 0]) - len(set(mapping.iloc[:, 0]))\n\n # how many mgi symbols did we map several times?\n rep_mgi = len(mapping.iloc[:, 1]) - len(set(mapping.iloc[:, 1]))\n\n # print this information\n print('Genes where no MGI annotations where found: {}\\nENSG repetition: {}\\nMGI repetition: {}'.\\\n format(not_found_mgi, rep_ensg, rep_mgi))\n\n # fill nans in mgi column with corresponding ensembl annotations\n mapping['Gene name'].fillna(mapping['Gene stable ID'], inplace = True)\n\n # add the new gene names to the adata object\n adata.var['mgi_symbols'] = mapping['Gene name'].tolist()", "def mutate(individual, mutation_rate):\n new_chromo = Chromosome(sequence=individual.sequence)\n if random.random() < mutation_rate:\n position = random.randrange(len(individual.sequence))\n mutation = format(random.randrange(9), 'x')\n sequence_list = list(individual.sequence)\n sequence_list[position] = mutation\n new_sequence_string = ''.join(sequence_list)\n new_chromo.sequence = new_sequence_string\n return new_chromo", "def get_equivalent_atom(self, atom):\n try:\n return self.atom_dict[atom.name]\n except KeyError:\n return None", "def get_gene(self):\n return self._gene", "def convert_amber_atomtype_to_rosetta_atomtype(self):\n\n tmpfile = open(\"tmp.mol2\", 'w')\n with open(\"ligand_am1_bcc.mol2\",'r') as f:\n atoms = False\n\n for line in f:\n\n print \"ATOM\", line.find(\"@<TRIPOS>ATOM\"),line\n print \"BOND\", line.find(\"@<TRIPOS>BOND\"),line\n\n if ( len(line) > 13 and line.find(\"@<TRIPOS>ATOM\") >-1.0):\n atoms = True\n\n elif ( len(line) > 13 and line.find(\"@<TRIPOS>BOND\") >-1.0):\n atoms = False\n\n elif( atoms == True and len(line) > 75 ):\n tmp_characters = line[47]+\".\"+line[48]\n line = line[0:47]+tmp_characters+line[50:]\n\n tmpfile.write(line)\n tmpfile.close()", "def atom(token):\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token) # Equivalent to str(token)", "def get_output_node_gene(key, config):\n gene1 = OutputNodeGene(key, config)\n gene1.aggregation = 'a'\n gene1.bias = 0\n gene2 = OutputNodeGene(key, config)\n gene2.aggregation = 'b'\n gene2.bias = 1\n return gene1, gene2", "def read_in_GO(pdbfile):\n with open(pdbfile, \"r\") as f:\n filedata = f.read()\n filedata = filedata.replace(\"C GRA X\", \"CX GGG \")\n content = filedata.splitlines()\n atom_lines = [x.split() for x in content if (('ATOM' in str(x)) and (('C1A' in str(x)) or ('E1A' in str(x)) or ('H1A' in str(x)) or ('GGG' in str(x))))]\n atoms = [Atom(int(str(atom_lines[x][1])), str(atom_lines[x][2]), str(atom_lines[x][3]), int(str(atom_lines[x][4])), float(str(atom_lines[x][5])), float(str(atom_lines[x][6])), float(str(atom_lines[x][7]))) for x in range(len(atom_lines))] \n return atoms", "def atom(token):\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)", "def fromgenotype(self):\n\t\tpass" ]
[ "0.6376666", "0.54954624", "0.5164283", "0.5075839", "0.49984407", "0.49653354", "0.4889176", "0.48709634", "0.47833455", "0.4767029", "0.47523892", "0.47412694", "0.47290564", "0.47286844", "0.47149265", "0.47015738", "0.46589604", "0.4652836", "0.46481746", "0.46437928", "0.46432018", "0.46273285", "0.46124208", "0.46080974", "0.4594867", "0.45920187", "0.45697054", "0.45427012", "0.4541786", "0.4539802" ]
0.7106877
0
Returns a random plush gene given atom_generators and epigeneticmarkers. Returns A random Plush gene from the ``atom_generators``.
def random_plush_gene(self): atom = random.choice(list(self.atom_generators)) return self.atom_to_plush_gene(atom)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __generate_random_gene_sequence(self):\n genes = []\n for j in range(self.chromosome_size):\n genes.append(random.choice(self.gene_pool))\n\n return genes", "def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in range(0,size,1):\n gene+=random.choice(self.instructions)\n return gene", "def random_gene(self):\n path_number = 6\n x = random.randint(0, path_number)\n return x", "def createGene(self):\n # Beginning and end of the alphabet for random gene generation\n Astart = 97\n Zend = 122\n return \"\".join(map(lambda i: chr(random.randint(Astart, Zend)), range(random.randint(4, 8)))).upper()", "def generate_random_gene_sequence(gene_pool):\n genes = []\n for j in range(DEFAULT_CHROMOSOME_SIZE):\n genes.append(random.choice(gene_pool))\n\n return genes", "def simulate_generations(gene_pool, environment, gen=DEFAULT_GENERATIONS):\n seq_to_fitness = multiprocessing.Manager().dict()\n chromosomes = []\n fittest_chromosome = []\n\n for i in range(DEFAULT_POPULATION_SIZE):\n chromosomes.append(generate_random_gene_sequence(gene_pool))\n\n for i in range(gen):\n chromosomes, fittest_chromosome = simulate_generation(chromosomes,\n gene_pool,\n environment,\n seq_to_fitness)\n\n if i < gen - 1:\n chromosomes = delete_duplicates(chromosomes, gene_pool)\n\n return fittest_chromosome", "def get_random_genome(self):\n return random.choice(self.genomes)", "def random_plush_genome_with_size(self, genome_size):\n atoms = rand.choice(list(self.atom_generators), size=genome_size)\n return [self.atom_to_plush_gene(atom) for atom in atoms]", "def rnd_genes(genes=[], n=1, gene_data=None):\n if gene_data is None:\n return np.array([])\n gene_tetra, gene_ct, gene_ids, gene_names = gene_data\n # how many genes are there total?\n if genes == []:\n sel_genes = np.ones(gene_ids.shape, dtype=bool)\n else:\n sel_genes = np.zeros(gene_ids.shape, dtype=bool)\n for gene in genes:\n sel_genes = np.logical_or(sel_genes, \\\n gene_ids == gene_names[gene])\n # randomly pick genes from the collection\n rand_picks = np.random.randint(sum(sel_genes), size=(n,))\n tetra = gene_tetra[sel_genes][rand_picks]\n return tetra", "def generate_E_random(number_obeservations, number_environments):\n E = np.random.randint(0, number_environments, (number_obeservations,1))\n return E", "def generate_rng(nrngs, startseed=None):\n start_rng = np.random.RandomState(startseed)\n for i in range(nrngs):\n yield np.random.RandomState(start_rng.randint(2**32))", "def _generator():\n filename_1 = 'gene.txt'\n filename_2 = 'geneSynonym.txt'\n gene_set_1 = gene_names(filename_1)\n gene_syn = gene_names(filename_2, complete=False)\n genes = gene_set_1 | gene_syn\n return genes", "def _make_random_genome(evo_config):\n\n # create random genome by creating chromosomes for box size and movement\n return _make_size_dict(evo_config), _make_move_pattern(_make_limb_dict(), evo_config)", "def random_chromosome(self):\n genes = []\n for i in range(self.chromosome_length):\n genes.append(self.random_gene())\n\n return genes", "def generate_random_individual():\n genotype = []\n ### Your code here\n return {'genotype': genotype, 'fitness': None }", "def generate_random_events_list(generator_spec_list):\n\n data = []\n for spec in generator_spec_list:\n generator = spec[tg.GENERATOR]\n data += tg.generate_data_stream(generator.models, spec[tg.NUM_EVENTS])\n random.shuffle(data)\n return data", "def random_glove_generator(emb_mean, emb_stddev):\n x = np.random.normal(loc=0.0, scale=1.0, size=len(emb_mean))\n x_rand = np.multiply(x, emb_stddev) + emb_mean\n return x_rand", "def get_random_generator(random_generator):\n # define random generator function\n if random_generator == 0:\n logger.info(\"Random generator: MersenneTwister\")\n return random_MersenneTwister\n\n elif random_generator == 1:\n logger.info(\"Random generator: Latin Hypercube\")\n return random_LatinHypercube\n\n else:\n raise ValueError(f\"No random generator exists for random_generator={random_generator}.\")", "def get_random_individual(self, generation):\n if len(self.generations) <= generation < 0:\n raise ValueError('Please enter a valid generation.')\n return self.get_individual(\n generation=generation,\n index=random.randint(0, len(self.generations[generation]) - 1))", "def genSeed():\n\tseed_length = int(''.join(random.SystemRandom().choice(string.digits) for _ in range(0, 3)))\n\tseed = os.urandom(seed_length)\n\thashing_algorithm = hashlib.shake_128()\n\thashing_algorithm.update(seed)\n\t# 2200 bytes from SHAKE-128 function is enough data to get 1024 coefficients\n\t# smaller than 5q, from Alkim, Ducas, Pöppelmann, Schwabe section 7:\n\tseed_hash = hashing_algorithm.digest(100)\n\treturn seed, seed_hash", "def generate_random_walker():\n # must have seeds that generate known problems\n must_have_seeds = [112, 308, 393]\n for seed in must_have_seeds:\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections\n while True:\n seed = random.randint(0, 2**10)\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections", "def generator(self, args, gen):\n import random\n\n if args.seed:\n random.seed(args.seed)\n seqs = [s for s in gen]\n sample_indices = random.sample(range(len(seqs)), min(len(seqs), args.number))\n for i in sample_indices:\n yield seqs[i]", "def breed(self, mate=None):\n our_code = self.code\n mate_code = mate.code\n instructs = [\"+\",\"-\",\"[\",\"]\",\"<\",\">\"]\n randint = random.randint(0, len(our_code))\n # Splice them together at random\n result_gene=(our_code[0:randint-1]+mate_code[randint:])\n # Optionally add/remove some info.\n if (random.choice(self.mutation_list)):\n if (random.choice([True, False, False, False,])):\n # Add info\n result_gene = result_gene+ random.choice(instructs)\n else:\n # Remove info\n result_gene = result_gene[:-1]\n try:\n if (random.choice(self.mutation_list)):\n rand = random.randint(0,len(result_gene))\n result_gene = result_gene[:rand-1] + random.choice(instructs) + result_gene[rand:]\n except:\n print \"Error mutating genome\"\n \n # Make a baby organism! *squee*\n return Organism(result_gene)", "def create_guess_code(self, pegs):\n\t\tselected_pegs = random.sample(pegs, 4)\n\t\t\n\t\treturn selected_pegs", "def getGeneLetter():\n iRand = random.randint(0, 3)\n if iRand == 0:\n return 'A'\n elif iRand == 1:\n return 'C'\n elif iRand == 2:\n return 'G'\n elif iRand == 3:\n return 'T'\n return '';", "def random_plush_genome(self, max_genome_size):\n genome_size = random.randint(1, max_genome_size)\n return self.random_plush_genome_with_size(genome_size)", "def gen_energies(n_muons):\r\n pdist, bounds = fit_energylaw()\r\n samples = monte_carlo_sample(pdist, bounds, n_muons)\r\n return samples", "def _get_generator(p, task_monitor=None): \n random = StrongRandom()\n candidate = random.randint(1, p - 1)\n if(task_monitor != None): task_monitor.tick()\n \n while(not _is_generator(p, candidate)):\n candidate = random.randint(1, p - 1)\n if(task_monitor != None): task_monitor.tick()\n \n if(params.DEBUG):\n assert pow(candidate, p - 1, p) == 1, \\\n \"generator^{p-1} != 1 mod p (!) see method's \" \\\n \"algorithm explanation.\"\n \n return candidate # this is the generator", "def simulate_generations(self, generations=DEFAULT_GENERATIONS):\n for i in range(generations):\n logging.getLogger().debug(self)\n self.__simulate_generation()\n\n if i < generations - 1:\n self.__delete_duplicates()\n\n return self.fittest_chromosome", "def random_push_code(self, max_points):\n max_genome_size = max(int(max_points / 2), 1)\n genome = self.random_plush_genome(max_genome_size)\n return genome_to_program(genome)" ]
[ "0.62055737", "0.61462736", "0.6118242", "0.59235466", "0.5799568", "0.5753317", "0.57463783", "0.5726904", "0.56850755", "0.5670097", "0.5586955", "0.55046254", "0.5476053", "0.54712176", "0.5389296", "0.5357205", "0.5284238", "0.5267698", "0.52539104", "0.52085936", "0.51701", "0.5159841", "0.5146589", "0.5143352", "0.5138929", "0.5136028", "0.5116095", "0.5107393", "0.50760156", "0.5075209" ]
0.741422
0
Returns a random Plush genome with size ``genome_size``.
def random_plush_genome_with_size(self, genome_size): atoms = rand.choice(list(self.atom_generators), size=genome_size) return [self.atom_to_plush_gene(atom) for atom in atoms]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_plush_genome(self, max_genome_size):\n genome_size = random.randint(1, max_genome_size)\n return self.random_plush_genome_with_size(genome_size)", "def get_random_genome(self):\n return random.choice(self.genomes)", "def generate_random_population(pop_size):\n\n random_population = []\n for agent in range(pop_size):\n random_population.append(generate_random_agent_keys())\n return random_population", "def _make_random_genome(evo_config):\n\n # create random genome by creating chromosomes for box size and movement\n return _make_size_dict(evo_config), _make_move_pattern(_make_limb_dict(), evo_config)", "def _generate_random_population(self, pop_size):\n\n random_population = []\n for agent in range(pop_size):\n random_population.append(self._generate_random_agent())\n return random_population", "def size_rand_sample(size):\n\n assert size > 0\n @sinks\n def _dagpype_internal_fn_act(target):\n i = 0\n sample = None\n try:\n while True:\n e = (yield)\n sample = [e] * size if i == 0 else [e if random.randint(0, i) == 0 else ee for ee in sample]\n i += 1\n except GeneratorExit:\n if sample is not None:\n target.send(sample)\n target.close()\n\n return _dagpype_internal_fn_act", "def generatePopulation(self,size):\n return [ self._individual(self._individualSize) for x in range(size) ]", "def get_random_sequence(genome):\n \n chr_list = get_chromosome_length(genome)\n \n random_seq = {}\n chr = random.sample(chr_list.keys(),1) #select chromosome\n slen = random.randint(300,1000) #select sequence length\n if chr_list[chr[0]] - slen > 0:\n spos = random.randint(1,chr_list[chr[0]] - slen) #select start position\n \n seq = get_fragment(genome, chr[0], slen, spos)\n if seq.count(\"N\") > 0.1 * slen:\n seq = get_random_sequence(genome)\n else:\n seq = get_random_sequence(genome)\n \n return seq", "def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in range(0,size,1):\n gene+=random.choice(self.instructions)\n return gene", "def initial_population(target_im, population_size):\r\n # Empty population of chromosomes accoridng to the population size specified.\r\n init_population = numpy.empty(shape=(population_size, \r\n functools.reduce(operator.mul, target_im)),\r\n dtype=numpy.uint8)\r\n for indv_num in range(population_size):\r\n # Randomly generating initial population chromosomes genes values.\r\n init_population[indv_num, :] = numpy.random.random(\r\n functools.reduce(operator.mul, target_im))*256\r\n return init_population", "def simplify_once(genome):\n gn = deepcopy(genome)\n n = randint(1, 4)\n action = choice(['silent', 'noop'])\n if action == 'silent':\n silent_n_random_genes(gn, n)\n else:\n noop_n_random_genes(gn, n)\n return gn", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def generate_individual(size):\n individual = []\n \n for i in range(size):\n individual.append(random.randint(0,1))\n\n return individual", "def initialPop(popSize,rangeMin,rangeMax,genLength):\n\t\n\tpop=[]\n\n\tfor i in range(popSize):\n\t\tgenome=[]\n\t\tfor j in range(genLength):\n\t\t\tparam=random.uniform(rangeMin,rangeMax)\n\t\t\tgenome.append(param)\n\t\tpop.append(Gen(genome)) #add each random genome to the pop\n\t\t\t\t\n\treturn pop", "def get_random_secret_key(cls, size=None):\n if not size:\n size = cls.default_secret_key_size\n return os.urandom(size)", "def generate_prime(size: int) -> int:\n while True:\n num = random.randrange(2 ** (size - 1), 2 ** (size))\n if is_prime(num):\n return num", "def generate_population(population_size, member_size):\n population = []\n\n for i in range(population_size):\n population.append(generate_individual(member_size))\n\n return population", "def mutate_chromosome(mutated_genome):\n seed = random.randint(0,5)\n if len(mutated_genome) <= 1: seed = 0\n if seed == 0:\n insert_chromosome(mutated_genome)\n elif seed == 1:\n remove_chromosome(mutated_genome)\n elif seed == 2:\n switch_chromosomes(mutated_genome)\n elif seed == 3:\n shuffle_chromosomes(mutated_genome)\n elif seed == 4:\n increment_chromosome(mutated_genome)\n else: #seed == 5:\n decrement_chromosome(mutated_genome)", "def benchmarkRandomFragment( fasta, size ):\n\n contig, strand, start, end = fasta.getRandomCoordinates( size )\n s = fasta.getSequence( contig, strand, start, end )\n return s", "def randomBitmap(size):\n\n b = bitmap(size)\n xmax, ymax = size\n for x in xrange(xmax):\n for y in xrange(ymax):\n b.set(x, y, random.randint(0,1))\n return b", "def id_generator(cls, size):\n\n return ''.join(random.choice(string.ascii_letters +\n string.digits + '-_') for _ in range(size))", "def remove_chromosome(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n del mutated_genome[index]", "def generate_population(population_size, nn_architecture):\n population = []\n for _ in range(population_size):\n population.append(nn.create_nn_from_arch(nn_architecture))\n\n return population", "def fitness(individual, size, seed=0):\n\n np.random.seed(seed)\n values = individual.dataframe.values.flat\n sample = np.random.choice(values, size=size)\n return min(sample)", "def mock_urandom(size: int) -> bytes:\n if size == 12:\n return b'Mb\\xd5N\\xc2\\xbd\\xa0\\xc8\\xa4L\\xfb\\xa0'\n elif size == 16:\n return b'\\xbb\\xd6\\x87\\xb6j\\xe5\\xdc\\x93\\xb0\\x13\\x1e\\xcc\\x9f\\xf4\\xca\\xab'\n elif size == 32:\n return b'\\x08\\xe0A\\xb6\\xf2\\xb7x\\x8f\\xe5\\xdap\\x87^6x~\\xa4F\\xc4\\xe9\\xb1\\x8a:\\xfbC%S\\x0cZ\\xbb\\xbe\\x88'\n else:\n return os.urandom(size)", "def create_random_sample(random_population, r = 100):\n choose_sample = [choice(random_population) for _ in xrange(r)]\n return choose_sample", "def generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))", "def shuffle_chromosomes(mutated_genome):\n random.shuffle(mutated_genome)", "def stringGen(size, chars=string.ascii_uppercase + string.digits):\n\treturn ''.join(random.choice(chars) for _ in range(size))", "def random_plush_gene(self):\n atom = random.choice(list(self.atom_generators))\n return self.atom_to_plush_gene(atom)" ]
[ "0.78010154", "0.6555178", "0.6489522", "0.639668", "0.6082051", "0.60245764", "0.5986952", "0.57938874", "0.5732188", "0.57120425", "0.5651485", "0.56411", "0.5639163", "0.5633647", "0.5595927", "0.55193704", "0.5512438", "0.54670936", "0.5464544", "0.54443496", "0.54303193", "0.5412801", "0.54041296", "0.53755575", "0.5372778", "0.5353416", "0.53522515", "0.5346463", "0.5336496", "0.53359115" ]
0.82965225
0
Returns a random Plush genome with size limited by max_genome_size.
def random_plush_genome(self, max_genome_size): genome_size = random.randint(1, max_genome_size) return self.random_plush_genome_with_size(genome_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_plush_genome_with_size(self, genome_size):\n atoms = rand.choice(list(self.atom_generators), size=genome_size)\n return [self.atom_to_plush_gene(atom) for atom in atoms]", "def get_random_genome(self):\n return random.choice(self.genomes)", "def _make_random_genome(evo_config):\n\n # create random genome by creating chromosomes for box size and movement\n return _make_size_dict(evo_config), _make_move_pattern(_make_limb_dict(), evo_config)", "def get_random_sequence(genome):\n \n chr_list = get_chromosome_length(genome)\n \n random_seq = {}\n chr = random.sample(chr_list.keys(),1) #select chromosome\n slen = random.randint(300,1000) #select sequence length\n if chr_list[chr[0]] - slen > 0:\n spos = random.randint(1,chr_list[chr[0]] - slen) #select start position\n \n seq = get_fragment(genome, chr[0], slen, spos)\n if seq.count(\"N\") > 0.1 * slen:\n seq = get_random_sequence(genome)\n else:\n seq = get_random_sequence(genome)\n \n return seq", "def random_push_code(self, max_points):\n max_genome_size = max(int(max_points / 2), 1)\n genome = self.random_plush_genome(max_genome_size)\n return genome_to_program(genome)", "def generate_random_population(pop_size):\n\n random_population = []\n for agent in range(pop_size):\n random_population.append(generate_random_agent_keys())\n return random_population", "def initialPop(popSize,rangeMin,rangeMax,genLength):\n\t\n\tpop=[]\n\n\tfor i in range(popSize):\n\t\tgenome=[]\n\t\tfor j in range(genLength):\n\t\t\tparam=random.uniform(rangeMin,rangeMax)\n\t\t\tgenome.append(param)\n\t\tpop.append(Gen(genome)) #add each random genome to the pop\n\t\t\t\t\n\treturn pop", "def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in range(0,size,1):\n gene+=random.choice(self.instructions)\n return gene", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def set_generator(random, args):\n representation = args.get('representation')\n indices = list(range(len(representation)))\n max_size = args.get('max_size', 9)\n variable_size = args.get('variable_size', True)\n if variable_size and max_size > 1:\n size = random.randint(1, max_size)\n else:\n size = max_size\n candidate = random.sample(indices, size)\n return sorted(candidate)", "def remove_chromosome(mutated_genome):\n index = random.randint(0,max(0,len(mutated_genome)-1))\n del mutated_genome[index]", "def random_plush_gene(self):\n atom = random.choice(list(self.atom_generators))\n return self.atom_to_plush_gene(atom)", "def unique_sample_of_int(max,size):\n idxs=set()\n num_left = size - len(idxs)\n while num_left > 0:\n idxs = idxs.union(set(np.random.random_integers(0,max,size=num_left)))\n num_left = size - len(idxs)\n return idxs", "def __generate_random_gene_sequence(self):\n genes = []\n for j in range(self.chromosome_size):\n genes.append(random.choice(self.gene_pool))\n\n return genes", "def simplify_once(genome):\n gn = deepcopy(genome)\n n = randint(1, 4)\n action = choice(['silent', 'noop'])\n if action == 'silent':\n silent_n_random_genes(gn, n)\n else:\n noop_n_random_genes(gn, n)\n return gn", "def generate() -> int:\n return randint(0, 1000000000)", "def create_random_sample(random_population, r = 100):\n choose_sample = [choice(random_population) for _ in xrange(r)]\n return choose_sample", "def _generate_random_population(self, pop_size):\n\n random_population = []\n for agent in range(pop_size):\n random_population.append(self._generate_random_agent())\n return random_population", "def choose_random(N):\n db = pymongo.MongoClient('localhost',27020).chembldb\n # Get all CHEMBL IDs\n db.molecules.ensure_index('chembl_id')\n chembl_ids = [m['chembl_id'] for m in db.molecules.find().sort('chembl_id')]\n print len(chembl_ids)\n random.seed(201405291515)\n rands = random.sample(chembl_ids, N)\n return(rands)", "def initial_population(target_im, population_size):\r\n # Empty population of chromosomes accoridng to the population size specified.\r\n init_population = numpy.empty(shape=(population_size, \r\n functools.reduce(operator.mul, target_im)),\r\n dtype=numpy.uint8)\r\n for indv_num in range(population_size):\r\n # Randomly generating initial population chromosomes genes values.\r\n init_population[indv_num, :] = numpy.random.random(\r\n functools.reduce(operator.mul, target_im))*256\r\n return init_population", "def seed_random(max_integer):\n return random.randrange(0,max_integer);", "def generate_random_gene_sequence(gene_pool):\n genes = []\n for j in range(DEFAULT_CHROMOSOME_SIZE):\n genes.append(random.choice(gene_pool))\n\n return genes", "def random_int(max=1000):\r\n return randint(0, max)", "def init_rnd(self):\n\n # query max number of threads\n gennum = apache.AP_MPMQ_MAX_SPARE_THREADS\n # make generators\n # this bit is from Python lib reference\n g = random.Random(time.time())\n result = [g]\n for i in range(gennum - 1):\n laststate = g.getstate()\n g = random.Random()\n g.setstate(laststate)\n g.jumpahead(1000000)\n result.append(g)\n return result", "def random_number(max_number):\n return random.randint(1, max_number)", "def shuffle_chromosomes(mutated_genome):\n random.shuffle(mutated_genome)", "def benchmarkRandomFragment( fasta, size ):\n\n contig, strand, start, end = fasta.getRandomCoordinates( size )\n s = fasta.getSequence( contig, strand, start, end )\n return s", "def generate_random(limit_lo, limit_hi):\n\n return RAND.randint(limit_lo, limit_hi)", "def rand_bytes_range(minlen, maxlen):\n return rand_bytes(random.randint(minlen, maxlen))", "def randomNumber(maxNumber):\n return random.randint(1, maxNumber)" ]
[ "0.753748", "0.7089966", "0.6752933", "0.6277186", "0.6107495", "0.6055105", "0.60515046", "0.602981", "0.5952698", "0.5886196", "0.57833123", "0.57707804", "0.57010764", "0.5688395", "0.566979", "0.5648049", "0.5640551", "0.56156576", "0.55757904", "0.5574941", "0.5560186", "0.5527133", "0.5498734", "0.54828405", "0.54755825", "0.54606545", "0.5430302", "0.5421066", "0.5404706", "0.5404258" ]
0.87570876
0
Returns a random Push expression with size limited by max_points.
def random_push_code(self, max_points): max_genome_size = max(int(max_points / 2), 1) genome = self.random_plush_genome(max_genome_size) return genome_to_program(genome)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_plush_genome(self, max_genome_size):\n genome_size = random.randint(1, max_genome_size)\n return self.random_plush_genome_with_size(genome_size)", "def random_plush_gene(self):\n atom = random.choice(list(self.atom_generators))\n return self.atom_to_plush_gene(atom)", "def random_plush_genome_with_size(self, genome_size):\n atoms = rand.choice(list(self.atom_generators), size=genome_size)\n return [self.atom_to_plush_gene(atom) for atom in atoms]", "def getRandomPipe():\r\n\r\n\r\n\tpipeHeight = GAME_SPIRTES['pipe'][0].get_height()\r\n\ty2 = offset + random.randrange(0, int(SCREENHEIGHT-GAME_SPIRTES['base'].get_height()) - 1.2*offset)\r\n\ty1 = pipeHeight - y2 +offset\r\n\tpipe = [\r\n\t{'x':pipeX,'y':-y1},\r\n\t{'x':pipeX,'y':y2}\r\n\r\n\t]\r\n\treturn pipe", "def getRandomPipe():\n pipeHeight = GAME_PHOTOS['pipe'][0].get_height()\n offset = SCREEN_HEIGHT/3\n y2 = offset + random.randrange(0, int(SCREEN_HEIGHT - GAME_PHOTOS['base'].get_height() - 1.2 *offset))\n pipeX = SCREEN_WIDTH + 10\n y1 = pipeHeight - y2 + offset\n pipe = [\n {'x': pipeX, 'y': -y1}, #upper Pipe\n {'x': pipeX, 'y': y2} #lower Pipe\n ]\n return pipe", "def random_pipe():\r\n pipe_height = GAME_SPRITES['pipe'][0].get_height()\r\n offset = SCREENHEIGHT/3\r\n position_for_lower_pipe_at_y = random.randrange(0, int(SCREENHEIGHT - GAME_SPRITES['base'].get_height() - 1.2 * offset))\r\n pipe_x = SCREENWIDTH * 10\r\n position_for_upper_pipe_at_y = pipe_height - position_for_lower_pipe_at_y + offset\r\n pipe = [\r\n {'x': pipe_x, 'y': position_for_upper_pipe_at_y},\r\n {'x': pipe_x, 'y': position_for_lower_pipe_at_y}\r\n ]\r\n return pipe", "def create_population(num_schedules, size=10, rate_range=(-6, 0)):\n pop = []\n for _ in range(0, num_schedules):\n exponents = np.random.uniform(rate_range[0], rate_range[1], size)\n schedule = np.power(10, exponents).tolist()\n pop.append(schedule)\n return pop", "def getRandomPipe():\n # y of gap between upper and lower pipe\n gapY = random.randrange(int(BASEY * 0.5), int(BASEY * 0.8))\n\n pipeX = SCREEN_WIDTH - 10\n\n return [{'x': pipeX, 'y': gapY}] # lower pipe", "def generate_packets():\n num_packets = randrange(10)\n temp_packets = []\n for i in range(num_packets):\n temp_packets.append(randrange(1000))\n return temp_packets", "def generate_random_data(size, x_min=X_MIN, x_max=X_MAX, y_min=Y_MIN, y_max=Y_MAX):\n result = []\n for _i in range(size):\n result.append((randint(x_min, x_max), randint(y_min, y_max)))\n\n return result", "def initialPop(popSize,rangeMin,rangeMax,genLength):\n\t\n\tpop=[]\n\n\tfor i in range(popSize):\n\t\tgenome=[]\n\t\tfor j in range(genLength):\n\t\t\tparam=random.uniform(rangeMin,rangeMax)\n\t\t\tgenome.append(param)\n\t\tpop.append(Gen(genome)) #add each random genome to the pop\n\t\t\t\t\n\treturn pop", "def create_random_points(n):\n\n\treturn [(random.randint(0,n),random.randint(0,n)) for i in range(n)]", "def get_hit_points(min, max):\n return random.randint(min, max)", "def _create_population(problem, algo_options, x0):\n popsize = algo_options.copy().pop(\"popsize\", 1) - 1\n pop = pg.population(\n problem, size=popsize, seed=algo_options.get(\"seed\", DEFAULT_SEED)\n )\n pop.push_back(x0)\n return pop", "def random_five(min_x, max_x):\n return random.sample(xrange(min_x, max_x), 5)", "def generate_pops(target_reg, exclude_regs=[], count=1, allow_dups=True):\n\n random_regs = []\n\n for _ in range(0, count-1):\n random_reg = get_random_register(exclude_regs=exclude_regs)\n\n random_regs.append(random_reg)\n\n pops = ''\n\n for reg in random_regs:\n pops += f'pop {reg}; '\n\n pops += f'pop {target_reg}; '\n\n return pops", "def generate_points(num_points):\n for i in xrange(0, num_points):\n pass", "def generate_random(limit_lo, limit_hi):\n\n return RAND.randint(limit_lo, limit_hi)", "def random_points(N, condition=None):\n\n def stream():\n \"\"\" An infinite stream of random points. \"\"\"\n while True:\n yield random_point()\n\n if condition is None:\n # approve unconditionally\n indexed_points = enumerate(stream())\n else:\n indexed_points = enumerate(ifilter(condition, stream()))\n\n points = list(takewhile(lambda (i, point): i < N, indexed_points))\n return (numpy.array([theta for _, (theta, _) in points]),\n numpy.array([phi for _, (_, phi) in points]))", "def set_generator(random, args):\n representation = args.get('representation')\n indices = list(range(len(representation)))\n max_size = args.get('max_size', 9)\n variable_size = args.get('variable_size', True)\n if variable_size and max_size > 1:\n size = random.randint(1, max_size)\n else:\n size = max_size\n candidate = random.sample(indices, size)\n return sorted(candidate)", "def Chose_rand():\r\n total_list=list(range(1,467681))\r\n select=13788\r\n random_selected= random.sample(total_list,select)\r\n return (random_selected)", "def quasi_rand(values, feature, parent):\r\n seed = values[0]\r\n base = values[1]\r\n min = values[2]\r\n max = values[3]\r\n \r\n return math.floor(halton(seed, base) * (max - min + 1) + min)", "def generateRandomMask(size, p=0.5):\n mask_array = (np.random.random(size) > p).astype(int)\n mask = sitk.GetImageFromArray(mask_array) \n return mask", "def simulate_x_values(self, minimum = -10, maximum = 10, length = 100):\n return np.sort(np.random.uniform(minimum, maximum, length) )", "def generate_random_data(min_, max_, len_):\n return np.random.uniform(min_, max_, len_)", "def rand(self) -> ZqValue:\n\n return self(randbelow(int(self.q)))", "def _random_x(self):\n return np.random.uniform(-self._extent, self._extent, self._batchsize)", "def random_temp():\n temp_min = 154\n temp_max = 500\n temp_interval = 1\n # `range`s are exclusive [min, max)\n return random.randrange(temp_min, temp_max + 1, temp_interval)", "def __call__(self, shape):\n return np.random.uniform(low=self.minval, high=self.maxval, size=shape)", "def generate_fake_ping_data(random_state, size):\n values = random_state.random_integers(low=5, high=20, size=size)\n picked_low_latency_values_indexes = random_state.choice(\n size, round(0.001 * len(values)), replace=False\n )\n\n # Sets the picked value to a random low ping (e.g.: [100, 200]),\n # and sets the direct close values to a ping between 40 and 80ms\n for index in picked_low_latency_values_indexes:\n if index - 1 >= 0:\n values[index - 1] = random_state.random_integers(40, 80)\n\n values[index] = random_state.random_integers(100, 200)\n\n if index + 1 < size:\n values[index + 1] = random_state.random_integers(40, 80)\n\n return values.tolist()" ]
[ "0.64721805", "0.57280564", "0.5667171", "0.5605492", "0.5529688", "0.5511735", "0.54048395", "0.5329024", "0.5326156", "0.5317445", "0.531234", "0.5264657", "0.51636416", "0.51634496", "0.5159698", "0.5148549", "0.51481014", "0.51468843", "0.51397496", "0.5133279", "0.5130493", "0.51183736", "0.5108177", "0.51057404", "0.50910807", "0.50493973", "0.5042688", "0.5037609", "0.5029573", "0.5025675" ]
0.7029213
0
Return the Ground State Energies
def _get_gs_energies(self): energy = [] for ground_state in self._ground_states: gs_energy = 0.0 for key in ground_state["eci"].keys(): gs_energy += ground_state["eci"][key] * ground_state["cf"][key] energy.append(len(ground_state["atoms"]) * gs_energy) return energy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gEs(self):\n try:\n return self._gEs\n except AttributeError:\n try:\n self._gEs = pd.read_csv(\n os.path.join(self.loc, self.gs_out),\n index_col=0)\n print(\"Reading ground spin states from %s.\" % self.gs_out)\n except OSError:\n self._gEs = self.groundstates_all()\n return self._gEs", "def FindGroundstate(**args):\n\targs[\"imtime\"] = True\n\n\tprop = SetupProblem(**args)\n\n\tfor t in prop.Advance(10):\n\t\tE = prop.GetEnergy()\n\t\tprint \"t = %3.2f, E = %2.8f\" % (t, E)\n\n\treturn prop", "def get_ground_state(self, atoms, **kw):\n from ase.calculators.siesta import Siesta\n\n if \"fdf_arguments\" not in kw.keys():\n kw[\"fdf_arguments\"] = {\"COOP.Write\": True,\n \"WriteDenchar\": True,\n \"XML.Write\": True}\n else:\n for param in [\"COOP.Write\", \"WriteDenchar\", \"XML.Write\"]:\n kw[\"fdf_arguments\"][param] = True\n\n siesta = Siesta(**kw)\n atoms.calc = siesta\n atoms.get_potential_energy()", "def getGameState(self):\n peg1 = ()\n peg2 = ()\n peg3 = ()\n onStatement = Statement()\n onTerm1 = Term('?x')\n onTerm2 = Term('?y')\n onStatement.terms = (onTerm1, onTerm2)\n onStatement.predicate = 'on'\n for fact in self.kb.facts:\n if match(fact.statement, onStatement):\n if fact.statement.terms[0] == Term(Constant('disk1')):\n disk = 1\n elif fact.statement.terms[0] == Term(Constant('disk2')):\n disk = 2\n elif fact.statement.terms[0] == Term(Constant('disk3')):\n disk = 3\n elif fact.statement.terms[0] == Term(Constant('disk4')):\n disk = 4\n elif fact.statement.terms[0] == Term(Constant('disk5')):\n disk = 5\n if fact.statement.terms[1] == Term(Constant('peg1')):\n peg1 = peg1 + (disk,)\n elif fact.statement.terms[1] == Term(Constant('peg2')):\n peg2 = peg2 + (disk,)\n elif fact.statement.terms[1] == Term(Constant('peg3')):\n peg3 = peg3 + (disk,)\n\n peg1 = tuple(sorted(peg1))\n peg2 = tuple(sorted(peg2))\n peg3 = tuple(sorted(peg3))\n result = (peg1, peg2, peg3)\n return result\n ### student code goes here", "def get_ground_states(save_folder,states_index,prefix=''):\n\n states = OrderedDict({})\n states['index'] = states_index\n\n gstates = open(save_folder + prefix + 'results.dat', 'r').readlines()\n\n for line in gstates:\n if 'Ground state energy' in line:\n states['gs0'] = float(line.split()[-2])\n\n ind = gstates.index('Symmetry Energy(eV)\\n')\n states['label'] = []\n states['energy'] = []\n for i in states_index:\n line = gstates[ind+i]\n state,ee = line.split()\n states['label'].append(state)\n states['energy'].append(states['gs0'] + float(ee))\n\n return states", "def groundstates_all(self):\n \n print(\"Calculating ground spin states.\")\n # Collect state energies from files. \n results = [self.get_states(struct) for struct in self.structs]\n \n # Construct dataframe. \n headers = np.array(self.states)\n gEs = (\n pd.DataFrame(data=results, index=self.structs, columns=headers))\n \n gEs['Ground State'] = gEs.idxmin(axis=1)\n \n return gEs", "def getExcitonStates(self):\n energies, coefficients = self.force_field.getExcitonStates()\n # check that wavefunctions are orthogonal\n olap = np.dot(coefficients.transpose(), coefficients)\n err = la.norm(olap - np.eye(len(energies)))\n assert err < 1.0e-10, \"exciton wavefunctions not orthogonal, |S - Id|= %e\" % err\n return energies, coefficients", "def getGameState(self):\n ### student code goes here\n onPeg1 = ()\n onPeg2 = ()\n onPeg3 = ()\n\n currentDisk = 1\n while(True):\n currentDiskFound = False\n for fact in self.kb.facts:\n if fact.statement.predicate == \"on\":\n disk = fact.statement.terms[0].term.element\n peg = fact.statement.terms[1].term.element\n diskNumber = int(disk[-1])\n pegNumber = int(peg[-1])\n\n if diskNumber == currentDisk:\n currentDiskFound = True\n\n if pegNumber == 1:\n onPeg1 += tuple([diskNumber])\n elif pegNumber == 2:\n onPeg2 += tuple([diskNumber])\n elif pegNumber == 3:\n onPeg3 += tuple([diskNumber])\n\n currentDisk += 1\n break #breaks out of the for loop\n\n if not currentDiskFound:\n return (onPeg1, onPeg2, onPeg3)", "def get_station_boroughs(self):\\", "def ensemble_perts(self):\n #emean = self.ensemble_mean()\n return self - self.ensemble_mean()\n #return self.state.values", "def _get_state(self):\n\n # stack all variables and return state array\n state = np.hstack((self.sheep_com, self.farthest_sheep, \n self.target, self.dog_pose, self.radius_sheep, \n self.target_distance))\n return state", "def get_e0(fpath):\n f = open(fpath)\n for line in f:\n if 'State #' in line:\n ldat = line.split()\n j = float(ldat[8])\n if j == 0.0:\n return float(ldat[5])\n else:\n raise GroundStateEnergyNotFoundException(\n '\\nA ground state could not be retrieved from %s' % fpath)", "def energy_states(self) -> List[int]:\n return self._energy_states", "def getstate(self):\r\n return SparseGP.getstate(self) + [self.init]", "def test_state(self):\n states = self.geographies.find({ 'geoid': config.SUMLEV_STATE })\n\n self.assertEqual(states.count(), 1)\n\n state = states[0]\n\n self.assertEqual(state['sumlev'], config.SUMLEV_STATE)\n self.assertEqual(state['metadata']['NAME'], 'Hawaii')\n self.assertEqual(state['metadata']['STATE'], '15')\n\n pop_2000 = 1211537\n pop_2010 = 1360301\n self._test_totalpop(state, pop_2000, pop_2010)", "def statee(h):\n # Convert height to SI\n hsi = h*0.3048\n\n # Get data\n zsi, tsi, psi, dsi = statsi(hsi)\n\n # Convert back to English\n z = zsi/0.3048\n t = tsi*1.8\n p = psi*0.02088543\n d = dsi*0.001940320\n\n return z, t, p, d", "def find_dark_states(excited_state, ground_states):", "def x(self):\n # REPLACE THE FOLLOWING WITH THE LOGIC TO CONSTRUCT/RETURN THE STATE\n x = {key: 0.0 for key in self.model.states}\n\n return x", "def get_state(self):\n return self.agents, self.foods, self.viruses, self.masses, self.time", "def _get_obs(self):\n # return np.concatenate((self.world.state[:6], self.world.state[7:13]))\n return np.concatenate((self.world.state, np.zeros(7)))\n # return self.world.state", "def generateState(self, gameState):\n state = [None, None, None, None, None, None]\n\n #Calculamos la distancia al fantasma mas cercano\n distGhosts = gameState.data.ghostDistances\n nearest = 100000\n for i in distGhosts:\n if i < nearest and i is not None:\n nearest = i\n if nearest <= 3:\n state[0] = 0\n elif nearest > 3 and nearest <= 7:\n state[0] = 1\n else:\n state[0] = 2\n\n legalActions = gameState.getLegalActions()\n #Calculamos los isPared\n #isParedEast\n if \"East\" in legalActions:\n state[1] = 0\n else:\n state[1] = 1\n\n #isParedWest\n if \"West\" in legalActions:\n state[2] = 0\n else:\n state[2] = 1\n\n #isParedNorth\n if \"North\" in legalActions:\n state[3] = 0\n else:\n state[3] = 1\n\n #isParedShouth\n if \"South\" in legalActions:\n state[4] = 0\n else:\n state[4] = 1\n\n #Bloque que controla si el pacman ha pasado por el hueco encontrado\n controlador =0\n x1,y1 = gameState.getPacmanPosition()\n if len(self.huecos)==2:\n\t\t\tif x1 ==self.huecos[0]:\n\t\t\t\tcontrolador=controlador+1\n\t\t\tif y1 ==self.huecos[1]:\n\t\t\t\tcontrolador=controlador+1\n\t\t\tif controlador == 2:\n\t\t\t\tself.huecos=[]\n\n posGhosts = gameState.getGhostPositions()\n nearestGhostIndex = distGhosts.index(nearest)\n nearestGhostPos = posGhosts[nearestGhostIndex]\n pacmanPosition = gameState.getPacmanPosition()\n\n print(self.huecos)\n if(len(self.huecos) == 2):\n goalPosition = self.huecos\n else:\n goalPosition = nearestGhostPos\n\n legales = []\n for a in self.goodActions(pacmanPosition,goalPosition): #Metodo goodActions devuelve las mejores acciones que puede realizar el pacman para llegar a su objetivo\n if a in legalActions:\n legales.append(a) #Buenas acciones legales\n if (len(legales) < 1):\n if (len(self.goodActions(pacmanPosition,goalPosition))): #Si existen buenas acciones pero no son legales es porque un muro esta impidiendo su ejecucion\n self.huecos = []\n rd= random.randint(0, len(self.goodActions(pacmanPosition,goalPosition))-1)\n self.buscaHueco(gameState,self.goodActions(pacmanPosition,goalPosition)[rd],pacmanPosition[0],pacmanPosition[1]) #Se busca hueco en una de las direcciones de las buenas acciones\n\n #Calculamos la direccion en la que se encuentra el fantasma o el hueco mas cercano\n # esto lo hacemos calculando la distancia del fantasma al pacman y viendo cual es la componente mayor.\n # Si es la X, la direccion sera o derecha o izquierda y dependera del sentido de esta. Con la Y parasria lo mismo\n relX = gameState.getPacmanPosition()[0] - goalPosition[0]\n relY = gameState.getPacmanPosition()[1] - goalPosition[1]\n\n if abs(relX) >= abs(relY):\n if relX > 0:\n state[5] = \"West\"\n elif relX < 0:\n state[5] = \"East\"\n elif abs(relY) > abs(relX):\n if relY > 0:\n state[5] = \"South\"\n elif relY < 0:\n state[5] = \"North\"\n\n\n return state", "def getState():\n # TODO: this isn't nearly as meaningful as it used to be", "def getAllWorldStates(self):\n arrays = []\n for i in range(1, self.num_ingredients):\n arrays.append(list(range(7))) #dont forget to change\n return list(itertools.product(*arrays))", "def get_states(self):\n raise NotImplementedError()", "def eval(self, state):\n valueOfPlayers = 0\n valueOfRebelAdvancments = 0\n valueOfLocations = 0\n\n\n\n for coordinate in state.gameState:\n if state.gameState[coordinate]==state.blank:\n continue\n elif state.gameState[coordinate]==state.rebel:\n valueOfRebelAdvancments = -coordinate[0]\n elif state.gameState[coordinate]==state.jedi:\n continue\n elif state.gameState[coordinate]==state.sith:\n continue\n \n valueOfLocations += valueOfRebelAdvancments\n\n \n valueOfPlayers = state.numRebels + 4*state.numJedi - 4*state.numSith\n \n return valueOfPlayers*4 + valueOfLocations", "def energies(self, num_levels=-1):\n if not self.solved: self.solve()\n return self.en[:num_levels]", "def getstate(self):\r\n return GPBase.getstate(self) + [self.Z,\r\n self.num_inducing,\r\n self.has_uncertain_inputs,\r\n self.X_variance]", "def test_state_units_EE(self):\n s = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"), units=\"EE\")\n assert s.units == \"EE\"\n assert s.cv.units == \"british_thermal_unit / degree_Rankine / pound\"\n assert s.cp.units == \"british_thermal_unit / degree_Rankine / pound\"\n assert s.s.units == \"british_thermal_unit / degree_Rankine / pound\"\n assert s.h.units == \"british_thermal_unit / pound\"\n assert s.T.units == \"degree_Fahrenheit\"\n assert s.u.units == \"british_thermal_unit / pound\"\n assert s.v.units == \"foot ** 3 / pound\"\n assert s.p.units == \"pound_force_per_square_inch\"", "def _get_state(self):\n start = self.design.first_unassigned_site\n return self.target.padded_encoding[\n start : start + 2 * self._env_config.state_radius + 1\n ]", "def ground_state(self) -> numpy.ndarray:\n # Optimal cost\n cost_min = min(self.cost)\n # Find the args that corresponds to the optimal cost\n args = numpy.where(self.cost == cost_min)\n # Create the ideal state\n rho = numpy.zeros(2**self.num_nodes)\n for arg in args[0]:\n rho[arg] = 1\n return rho / numpy.sum(rho)" ]
[ "0.6230123", "0.61784995", "0.6009308", "0.5999785", "0.5996852", "0.59820014", "0.5958661", "0.5931075", "0.5889137", "0.5888036", "0.5803028", "0.576005", "0.57563055", "0.5721198", "0.5711022", "0.5687426", "0.5670511", "0.56288415", "0.56270766", "0.56044376", "0.55956", "0.5595573", "0.5590203", "0.5587449", "0.55870074", "0.55858165", "0.55689216", "0.5564601", "0.55487627", "0.5542581" ]
0.6506218
0
Sets the integration direction.
def _set_integration_direction(self, T0, Tend): if Tend is None: # Use the default which is increasing from 0K return if T0 > Tend: self._integration_direction = "decreasing" else: self._integration_direction = "increasing"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setDirection(self,stepDir = 2):\n pass", "def setdirection(self, *args, **kwargs):\n return _coordsys.coordsys_setdirection(self, *args, **kwargs)", "def direction(self, direction):\n\n self._direction = direction", "def set_direction(self, new_dir):\n self.__direction = new_dir", "def set_direction(self, dir):\n if dir == 0:\n self.direction = [0, -1]\n elif dir == 1:\n self.direction = [1, 0]\n elif dir == 2:\n self.direction = [0, 1]\n elif dir == 3:\n self.direction = [-1, 0]", "def setRobotDirection(self, direction):\n self.direction = direction", "def setRobotDirection(self, direction):\n self.direction = direction", "def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError", "def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError", "def set_direction(self, direction: str) -> None:\n self.wink.set_fan_direction(direction)", "def set_direction(self, direction: str) -> None:\n if direction == \"forward\":\n self._bond.setDirection(self._deviceId, Directions.FORWARD)\n elif direction == \"reverse\":\n self._bond.setDirection(self._deviceId, Directions.REVERSE)\n self._attributes['current_direction'] = direction", "def set_direction(self, direction: int) -> None: \r\n self.direction = direction\r\n if (direction == Directions.turn_left or\r\n direction == Directions.turn_right):\r\n self.stop_timer = time.time() + self.driving_time_turning\r\n else:\r\n self.stop_timer = time.time() + self.driving_time", "def setDirection (self, ra, dec):\n self._response.setDirection(ra, dec)", "def direction(self, direction):\n allowed_values = [\"supports\", \"does_not_support\"] # noqa: E501\n if direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `direction` ({0}), must be one of {1}\" # noqa: E501\n .format(direction, allowed_values)\n )\n\n self._direction = direction", "def dock_direction_set(self, value):\r\n \r\n self._dock_direction = value", "async def async_set_direction(self, direction: str) -> None:\n if direction == DIRECTION_FORWARD:\n self._device.fan_dir = SENSEME_DIRECTION_FORWARD\n else:\n self._device.fan_dir = SENSEME_DIRECTION_REVERSE", "def direction(self, direction):\n _api.check_in_list(['horizontal', 'vertical'], direction=direction)\n if hasattr(self, '_direction') and direction != self._direction:\n # remove previous artists\n self._selection_artist.remove()\n if self._interactive:\n self._edge_handles.remove()\n self._direction = direction\n self.new_axes(self.ax)\n if self._interactive:\n self._setup_edge_handles(self._handle_props)\n else:\n self._direction = direction", "def set_dir(self, dir, resistor=None):\n self.IN = mraa.DIR_IN\n self.OUT = mraa.DIR_OUT\n self.PULL_UP = mraa.DIR_OUT_HIGH\n self.PULL_DOWN = mraa.DIR_OUT_LOW\n if dir not in (mraa.DIR_OUT, mraa.DIR_IN):\n # incorrect arguments passed in\n raise Exception(\"Incorrect pin direction dir={}. Use 'gpio.IN' or 'gpio.OUT'\".format(dir))\n elif resistor not in (None, self.PULL_UP, self.PULL_DOWN):\n # incorrect arguments passed in\n raise Exception(\"Incorrect resistor={}. Use 'UP' or 'Down'\".format(resistor))\n elif dir is self.IN:\n self.dir = dir\n self.gpio_pin.dir(self.IN)\n if resistor is not None:\n raise Warning('default', 'Pin dir is {} but should be \\'None\\' when using resistor'.format(dir))\n elif resistor is not None:\n self.resistor = resistor\n self.dir = dir\n # default to only output\n if resistor is self.PULL_UP:\n self.gpio_pin.dir(mraa.DIR_OUT_HIGH)\n else:\n self.gpio_pin.dir(mraa.DIR_OUT_LOW)\n else:\n self.resistor = resistor\n self.dir = dir\n # default to only output\n self.gpio_pin.dir(mraa.DIR_OUT)", "def set_direction(self, direction):\n\n def same_axis(direction1, direction2):\n y_axis = [Direction.Y_POSITIVE, Direction.Y_NEGATIVE]\n x_axis = [Direction.X_POSITIVE, Direction.X_NEGATIVE]\n return ((direction1 in x_axis and direction2 in x_axis)\n or (direction1 in y_axis and direction2 in y_axis))\n\n if direction is None:\n return\n elif not same_axis(self.direction, direction):\n self.direction = direction", "def direction(self):\n _direction = self._custom.get(\"direction\")\n if _direction is not None:\n return _direction\n\n _direction = self._infer_direction()\n self._custom[\"direction\"] = _direction\n\n return _direction", "def Direction(self, direction):\r\n \r\n self.dock_direction = direction\r\n return self", "def direction(self):\n return self.cfg.direction", "def set_integration(self, integration):\n self.integration = integration\n self.nt = numba_functions.pow2ceil(integration.size)\n self.nf = self.nt // 2\n\n dt = self.info.instrument.sampling_interval.decompose().value\n self.df = 1.0 / (dt * self.nt)\n self.set_channels(integration.channels)", "def set_port_direction(self, port, direction):\n\n if port == 1:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, direction)\n self.__port_b_direction = direction\n else:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, direction)\n self.__port_a_direction = direction\n return", "def setOrientation(self, direction=None, up=None):\n if direction is None: # Use current direction\n direction = self.direction\n else:\n assert len(direction) == 3\n direction = numpy.array(direction, copy=True, dtype=numpy.float32)\n direction /= numpy.linalg.norm(direction)\n\n if up is None: # Use current up\n up = self.up\n else:\n assert len(up) == 3\n up = numpy.array(up, copy=True, dtype=numpy.float32)\n\n # Update side and up to make sure they are perpendicular and normalized\n side = numpy.cross(direction, up)\n sidenormal = numpy.linalg.norm(side)\n if sidenormal == 0.:\n raise RuntimeError('direction and up vectors are parallel.')\n # Alternative: when one of the input parameter is None, it is\n # possible to guess correct vectors using previous direction and up\n side /= sidenormal\n up = numpy.cross(side, direction)\n up /= numpy.linalg.norm(up)\n\n self._side = side\n self._up = up\n self._direction = direction\n self.notify()", "def move(self, direction):\r\n self.stored_direction = direction", "def shiftDir(self, direction, n):\n assert Direction.isDir(direction), \"incorrect type of arg direction: should be a Direction, is {}\".format(type(direction))\n assert isinstance(n, AxisDistance), 'incorrect type of arg n: should be type AxisDistance, is type {}'.format(type(n))\n direction = Direction(direction)\n self.x += direction.dx * n\n self.y += direction.dy * n\n return self", "def set_direction(self, right_or_left):\r\n if right_or_left == \"r\":\r\n self.__direction = self.__direction - 7\r\n elif right_or_left == \"l\":\r\n self.__direction = self.__direction + 7", "def steer(self, direction):\n\n if -1 <= direction <= 1:\n target_position = self.steering_limit * direction\n self.brick_pi.set_motor_position(\n self.motor_steer, -target_position)", "def integration_setting(self, integration_setting):\n\n self._integration_setting = integration_setting" ]
[ "0.7542952", "0.7146989", "0.69682", "0.69272983", "0.69205153", "0.68018293", "0.68018293", "0.67248225", "0.67248225", "0.6560766", "0.649372", "0.6449591", "0.6381216", "0.6296564", "0.6294324", "0.6231282", "0.61218536", "0.60626364", "0.60610044", "0.6020907", "0.6015768", "0.60032713", "0.59702855", "0.5968518", "0.59585166", "0.59240943", "0.58851874", "0.58682114", "0.5836824", "0.5784962" ]
0.76403767
0
Returns true if we reached the temperature end point.
def _reached_temperature_end_point(self, T, Tend): if Tend is None: # End point not give return False if self._integration_direction == "increasing": if T > Tend: return True elif self._integration_direction == "decreasing": if T < Tend: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_done(self):\n return True if self.t >= self.max_ep_len else False", "def if_end(self, **kwargs):\n\n index = self.get('_index')\n\n if index and index >= len(self.steps)-1:\n return True # all steps have been used\n\n return False", "def isFinished(self):\n current = self.robot.drivetrain.get_gyro_angle()\n # If abs(target - current) < threshold then return true\n return math.fabs(self._target_degrees - current) <= self._degree_threshold or self.isTimedOut()", "def has_end(self):\n return bool(self._end)", "def _is_at_end(self):\n return self.current >= len(self.source)", "def has_ended(self):\r\n if self.end is None:\r\n return False\r\n\r\n return datetime.now(UTC()) > self.end", "def _termination(self):\n if self._never_terminate:\n return False\n\n if self._counter >= self._max_steps:\n return True\n\n return self.is_fallen() # terminates automatically when in fallen state", "def reached(self) -> bool:\n return (time.time() - self._start) >= self.seconds", "def _is_at_end(self):\n return self._peek().token_type == scanner.TokenType.EOF", "def end_of_epoch(self):\n return not self._cur_epoch_itr.has_next()", "def check_end(self):\n return [self.x, self.y] == self.end_pos", "def _is_end(self, symbol):\n if symbol.id == self.scanner.END_ID:\n return True\n else:\n return False", "def is_train_test_ended(self) -> bool:\n if self._unit is not None:\n return self._unit.is_train_test_ended()\n else:\n return False", "def isendofheated(self,lag):\n kmax = self.n\n v1 = self.v1\n v2 = self.v2\n for k in range(kmax-1):\n if lag[k+1]>=(v2+v1)/(v2-v1) * lag[k]:\n return False\n return True", "def is_finish(self,location):\n return location[0] == self.columns - 1 and location[1] == self.rows - 1", "def isFinished(self) -> bool:\n\n # Need to convert distance travelled to degrees. The Standard\n # Romi Chassis found here, https://www.pololu.com/category/203/romi-chassis-kits,\n # has a wheel placement diameter (149 mm) - width of the wheel (8 mm) = 141 mm\n # or 5.551 inches. We then take into consideration the width of the tires.\n inchPerDegree = math.pi * 5.551 / 360.0\n\n # Compare distance travelled from start to distance based on degree turn\n return self._getAverageTurningDistance() >= inchPerDegree * self.degrees", "def has_next(self) -> bool:\n return (self._high - self._low) > self._tol", "def update_temperature(self):\n if self.T < self.Tmin:\n return False\n self.T -= self.alpha\n\n return True", "def ended(self):\n return self.dur <= 0", "def is_halted(self):\n\t\treturn self.pos == -1", "def has_next(self) -> bool:\n return self._high is None or self._high - self._low > self._tol", "def isFinished(self):\n\n currentValue = numpy.power(10, self.idxCurrentF / self.nbPtsF)\n if currentValue == 0:\n return True\n\n # It can be more than one line for the previous alignment value.\n # We iterate until we find a better value or to the end of the lines.\n for i in self:\n while i.nextLine[self.idx] > currentValue and not i.isFinished:\n i.next();\n \n return not any(i.nextLine[self.idx] <= currentValue for i in self)", "def __bool__(self):\n return self.end < len(self.data)", "def is_high_temp(self):\n status = self.get_status_response()\n return ((status[1] & 0x20) == 0x20)\n #end is_power_limited()", "def isFinish(self):\n return self.finish", "def reached_dest(self) -> bool:\n return self.base_route[-1] == self.traveled_nodes[-1][self.NODE_INDEX]", "def is_eof(self) -> bool:\n ...", "def is_simulation_finished(self):\n if self.config.max_time is None:\n return False\n return self.time_is_out() or self.all_customers_in_destination()", "def atTail(self):\n return self.cursor == self.tail", "def has_happened(self):\n\n return self.end < timezone.now()" ]
[ "0.68749845", "0.6486663", "0.64634204", "0.640316", "0.6394104", "0.6357551", "0.6345655", "0.63410616", "0.6326322", "0.63182914", "0.62896657", "0.62468636", "0.6238003", "0.6207513", "0.62044257", "0.6198576", "0.6184775", "0.61823577", "0.6150872", "0.61056906", "0.6073026", "0.60708845", "0.6069689", "0.6043219", "0.60428214", "0.6013263", "0.6010061", "0.59703237", "0.593869", "0.59345496" ]
0.83553135
0
Stores backup data to hdf5 file
def _backup(self, data, dsetname="data"): with h5.File(self._backupfile, 'a') as hfile: grp = hfile.create_group( dsetname + "{}".format( self._current_backup_indx)) for key, value in data.items(): if value is None: continue if key == "images": for img_num, img in enumerate(value): if img is None: continue #img = img.T dset = grp.create_dataset( "img_{}".format(img_num), data=img) dset.attrs['CLASS'] = "IMAGE" dset.attrs['IMAGE_VERSION'] = '1.2' dset.attrs['IMAGE_SUBCLASS'] = 'IMAGE_INDEXED' dset.attrs['IMAGE_MINMAXRANGE'] = np.array( [0, 255], dtype=np.uint8) else: grp.create_dataset(key, data=value) self._current_backup_indx += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_as_hdf5(self, filename):", "def save_backup(\n self):\n self.backup = self.data", "def to_hdf5(self, filepath, **kwargs):\n hdf = pd.HDFStore(filepath, **kwargs)\n hdf.put(self.INDEXDATAFRAME, self.df, format='fixed', data_columns=True)\n hdf.close()", "def save_h5_file(self, result_dict, loc_id):\n if self.storage_type == 's3':\n file_path = '{0}/{1}_combined.h5'.format(\n self.output_path,\n loc_id\n )\n hf_result = h5py.File(file_path, 'w')\n for k, v in result_dict.items():\n hf_result.create_dataset(k, data=v)\n hf_result.close()\n else:\n file_path = os.path.join(self.output_path, '{0}_combined.h5'.format(loc_id))\n hf_result = h5py.File(file_path, 'w')\n for k, v in result_dict.items():\n hf_result.create_dataset(k, data=v)\n hf_result.close()\n return file_path", "def SaveStackH5(self):\n\n try: \n wildcard = \"HDF5 files (*.hdf5)|*.hdf5\"\n dialog = wx.FileDialog(None, \"Save as .hdf5\", wildcard=wildcard,\n style=wx.SAVE|wx.OVERWRITE_PROMPT)\n\n if dialog.ShowModal() == wx.ID_OK:\n filepath = dialog.GetPath()\n self.page1.filename = dialog.GetFilename()\n dir = dialog.GetDirectory()\n \n self.common.path = dir\n self.common.filename = self.page1.filename\n\n wx.BeginBusyCursor() \n self.stk.write_h5(filepath, self.data_struct) \n wx.EndBusyCursor() \n\n except:\n\n wx.EndBusyCursor()\n wx.MessageBox(\"Could not save HDF5 file.\")\n \n dialog.Destroy()\n self.refresh_widgets()\n \n return", "def save_hdf5(self, filename):\n filename += '.h5'\n try:\n hf = h5py.File(filename, 'w')\n hf.create_dataset('Array', data=self.flat_array)\n hf.close()\n except TypeError as err:\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message('TypeError [{}] when attempting to save HDF5'.format(err))\n else:\n print('TypeError [{}] when attempting to save HDF5'.format(err))", "def write(data: orm.Data, filename: str) -> None:\n save(to_bands_inspect(data), hdf5_file=filename)", "def _update_hdf5_file(self, field_name, saveformat, data, timestep, t):\n assert saveformat == \"hdf5\"\n fullname, metadata = self._get_datafile_name(field_name, saveformat, timestep)\n\n # Create \"good enough\" hash. This is done to avoid data corruption when restarted from\n # different number of processes, different distribution or different function space\n local_hash = sha1()\n local_hash.update(str(data.function_space().mesh().num_cells()))\n local_hash.update(str(data.function_space().ufl_element()))\n local_hash.update(str(data.function_space().dim()))\n local_hash.update(str(MPI.size(mpi_comm_world())))\n\n # Global hash (same on all processes), 10 digits long\n global_hash = MPI.sum(mpi_comm_world(), int(local_hash.hexdigest(), 16))\n global_hash = str(int(global_hash%1e10)).zfill(10)\n\n #key = (field_name, saveformat)\n #datafile = self._datafile_cache.get(key)\n #if datafile is None:\n # datafile = HDF5File(mpi_comm_world(), fullname, 'w')\n # self._datafile_cache[key] = datafile\n\n # Open HDF5File\n if not os.path.isfile(fullname):\n datafile = HDF5File(mpi_comm_world(), fullname, 'w')\n else:\n datafile = HDF5File(mpi_comm_world(), fullname, 'a')\n\n # Write to hash-dataset if not yet done\n if not datafile.has_dataset(global_hash) or not datafile.has_dataset(global_hash+\"/\"+field_name):\n datafile.write(data, str(global_hash)+\"/\"+field_name)\n\n if not datafile.has_dataset(\"Mesh\"):\n datafile.write(data.function_space().mesh(), \"Mesh\")\n\n # Write vector to file\n # TODO: Link vector when function has been written to hash\n datafile.write(data.vector(), field_name+str(timestep)+\"/vector\")\n\n # HDF5File.close is broken in 1.4\n if dolfin_version() == \"1.4.0+\":\n datafile.close()\n del datafile\n # Link information about function space from hash-dataset\n hdf5_link(fullname, str(global_hash)+\"/\"+field_name+\"/x_cell_dofs\", field_name+str(timestep)+\"/x_cell_dofs\")\n hdf5_link(fullname, str(global_hash)+\"/\"+field_name+\"/cell_dofs\", field_name+str(timestep)+\"/cell_dofs\")\n hdf5_link(fullname, str(global_hash)+\"/\"+field_name+\"/cells\", field_name+str(timestep)+\"/cells\")\n\n return metadata", "def convert(self, out_path: str)->None:\n tape_data_hdf5 = self.createTapeHDF5Dict()\n \n self.deleteFile(out_path)\n self.to_hdf5(tape_data_hdf5, out_path)\n print(\"HDF5 file has been successfully saved at {}\".format(out_path))", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def dump_blob(self, data):\n path = self._objectpath(data.uuid)\n with pd.HDFStore(self.rootpath, mode=\"a\") as hdf:\n if not isinstance(data.df, pd.DataFrame):\n df = pd.DataFrame()\n else:\n df = data.df\n logger.debug(f\"store {path}/metadata\")\n hdf.put(f'{path}/metadata', data.metadata.df, format='fixed', data_columns=True)\n hdf.put(f'{path}/table', df, format='fixed', data_columns=True)\n hdf.put(f'{path}/description', data.description_to_df(), format='fixed',\n data_columns=True)\n return path", "def save_to_hd5(out_file, x_train, y_train, x_val, y_val, x_test, y_test):\n data = h5py.File(out_file, \"w\")\n train_data = data.create_group(\"train_data\")\n train_data.create_dataset(\"x_train\", data=x_train)\n train_data.create_dataset(\"y_train\", data=y_train)\n if x_val is not None:\n val_data = data.create_group(\"val_data\")\n val_data.create_dataset(\"x_val\", data=x_val)\n val_data.create_dataset(\"y_val\", data=y_val)\n if x_test is not None:\n test_data = data.create_group(\"test_data\")\n test_data.create_dataset(\"x_test\", data=x_test)\n test_data.create_dataset(\"y_test\", data=y_test)\n data.close()", "def SaveResultsToH5(self):\n\n try: \n wildcard = \"HDF5 files (*.hdf5)|*.hdf5\"\n dialog = wx.FileDialog(None, \"Save as .hdf5\", wildcard=wildcard,\n style=wx.SAVE|wx.OVERWRITE_PROMPT)\n\n if dialog.ShowModal() == wx.ID_OK:\n filepath = dialog.GetPath()\n self.page1.filename = dialog.GetFilename()\n dir = dialog.GetDirectory()\n \n self.common.path = dir\n self.common.filename = self.page1.filename\n\n wx.BeginBusyCursor() \n self.stk.write_results_h5(filepath, self.data_struct, self.anlz) \n wx.EndBusyCursor() \n\n except:\n\n wx.EndBusyCursor()\n wx.MessageBox(\"Could not save HDF5 file.\")\n \n dialog.Destroy()\n self.refresh_widgets()\n \n return", "def store(self, filename):", "def write(self,data): \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n \n if os.path.exists(self.outfile):\n output = h5py.File(self.outfile,'a')\n else:\n output = h5py.File(self.outfile,'w')\n\n # Set permissions and group\n if self.set_permissions:\n try:\n os.chmod(self.outfile,0o664)\n shutil.chown(self.outfile, group=self.permissions_group)\n except PermissionError:\n self.logger(f'{fname}:{self.name}: Warning, couldnt set the file permissions.')\n\n # Store datasets in root\n data_out = {'tod':self.all_tod,\n 'weights':self.all_weights,\n 'mask':self.all_mask,\n 'cal_factors':self.all_cal_factors,\n 'frequency':self.all_frequency,\n 'auto_rms':self.all_auto}\n\n for dname, dset in data_out.items():\n if dname in output:\n del output[dname]\n output.create_dataset(dname, data=dset)\n\n output.attrs['version'] = __level3_version__\n output['cal_factors'].attrs['source'] = self.cal_source\n output['cal_factors'].attrs['calibrator_obsid'] = self.nearest_calibrator\n\n output.close()\n \n if self.level3 in data.keys():\n del data[self.level3]\n data[self.level3] = h5py.ExternalLink(self.outfile,'/')", "def store_sequence(sequence: list) -> None:\n ensure_data_folder_existence()\n file_name = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')\n\n file_loc = '{}/{}.h5'.format(params.DATA_FOLDER_NAME, file_name)\n\n print(\"Storing h5py\")\n h5f = h5py.File(file_loc, 'w')\n h5f.create_dataset('sequence', data=sequence)\n h5f.close()\n print(\"H5 stored\")", "def to_hdf5(self, filename):\n\n f = h5py.File(filename, 'w')\n f['xyz'] = self.xyz\n f.close()\n\n return", "def write_hdf5(filename, data):\n \n if '.h5' in filename:\n fid = h5py.File(filename, 'w')\n else:\n filename = filename+'.h5'\n fid = h5py.File(filename, 'w')\n\n print('Writing %s...'%filename)\n\n write_hdf5_group(fid, data)\n\n fid.close()\n print('Finished writting %s.'%filename)\n return", "def write_data_to_h5(data, filename):\n f = h5py.File(filename, 'w', libver='latest')\n dset = f.create_dataset('array', shape=(data.shape), data=data, compression='gzip', compression_opts=9)\n f.close()", "def save_to_hdf(df, fname, output_subdir=None):\n path = Path(fname)\n newfname = path.with_suffix('.h5').name\n folderpath = HOME / 'output'\n if output_subdir:\n folderpath = folderpath / output_subdir\n path = folderpath / newfname\n df.to_hdf(str(path), 'df', format='t')\n return str(path)", "def save(self,file_path):\n hf = h5py.File(file_path, 'w')\n hf.attrs[\"annotations\"] = str(self.annotations)\n f=hf.create_group(\"funds\")\n for i,fund in enumerate(self.funds_set):\n grp = f.create_group(\"fund_{0}\".format(i))\n grp.create_dataset(\"data\",data=fund.__getstate__()[\"data\"])\n grp[\"params\"]=str(fund.__getstate__()[\"params\"])\n g=hf.create_group(\"portfolios\")\n for i,portfolio in enumerate(self.portfolios_set):\n grp = g.create_group(\"portfolio_{0}\".format(i))\n grp.attrs[\"capital\"]=portfolio.__getstate__()[\"capital\"]\n grp.create_dataset(\"commitments\",data=portfolio.__getstate__()[\"commitments\"])\n grp.create_dataset(\"vintages\", data=portfolio.__getstate__()[\"vintages\"])\n funds=[hash(portfolio.funds[i]) for i in range(len(portfolio))]\n grp.create_dataset(\"funds\", data=np.array(funds,dtype=h5py.string_dtype(encoding='utf-8')))\n hf.close()", "def save_to_h5(data_dict, save_path, overwrite=False, dlen=32):\n h5file = h5py.File(save_path, 'a')\n good, dup_list = _check_h5_r(data_dict, h5file, overwrite)\n if good:\n if len(dup_list) > 0:\n logger.warning(f\"{dup_list} already found in {save_path}. Overwriting...\")\n _save_h5_r(data_dict, h5file, dlen)\n logger.info(f\"Saved data to {save_path}\")\n else:\n logger.warning(f\"{dup_list} already found in {save_path}. Save to file canceled. \" \\\n \"Please set `overwrite=True` or specify a different file path.\")\n h5file.close()", "def write2hdf5(filename, dict2store, compression=\"lzf\"):\n\twith h5py.File(filename,'w') as hf:\n\t\tfor key,value in dict2store.iteritems():\n\t\t\thf.create_dataset(key, data=value,compression=compression)", "def store_hdf5(images, labels, ID:str, path:str = \"data/dataset/\"):\n\n #create a new HDF5 file\n file = h5py.File(path+ID+\"_lens.h5\", \"w\")\n\n #create a dataset in the file\n dataset = file.create_dataset( \"images\", np.shape(images), h5py.h5t.IEEE_F64BE, data=images)\n file.close()\n\n labels.to_hdf(path +ID+'_meta.h5', \"table\")", "def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)", "def write_h5(fname: str, data: dict) -> None:\n try:\n with h5py.File(fname, 'w') as f:\n recursively_save_dict_contents_to_group(f,'/',data)\n except IOError as e:\n print(f\"Cannot write HDF5 file {fname}\")\n print(f\"IOError: {e}\")", "def store_hdf_h5py(self, compression_name, **compression_options):\n\n types = [('counter', uint8), ('timestamp', uint64),\n ('acceleration', uint16)]\n number_lines = len(self.values)\n data = recarray(number_lines, dtype=types)\n data['counter'] = asarray(self.counters)\n data['timestamp'] = asarray(self.timestamps)\n data['acceleration'] = asarray(self.acceleration)\n\n filepath = self.filepath.with_name(\n f\"{self.filepath.stem} h5py {compression_name}\").with_suffix(\n \".hdf5\")\n with File(filepath, 'w') as hdf:\n hdf.create_dataset(\"acceleration\",\n data=data,\n shape=(number_lines, ),\n **compression_options)", "def save_frame_to_hdf5_file(fsrc, key = 'images', compression = 0):\n preparation = \"\"\n preparation += \"from h5py import File;\"\n preparation += \"from tempfile import gettempdir;\"\n preparation += \"import os;\"\n preparation += \"root = gettempdir()\"\n preparation += \"filename_dst = os.path.join(root,'test_destination.hdf5')\"\n preparation += \"filename_dst = os.path.join(root,'test_destination.hdf5')\"\n testcode = ''", "def export_hdf(dataset_id, df):\n\n df.to_hdf(\n f\"{PROCESSED_DIR}/{dataset_id}.h5\",\n key=dataset_id,\n complevel=COMPLEVEL,\n complib=COMPLIB,\n mode=\"w\",\n )", "def create_output_file(self):\n if self.options['storage_method'] == 'hdf5':\n try:\n fp = h5py.File(self.file_name, \"w\")\n except IOError:\n print \"Unable to open output file '%s'\" % self.file_name\n sys.exit(1)\n # remember file pointer\n self.file_pointer = fp\n print \"Creating file '%s'\" % self.file_name\n elif self.options['storage_method'] == 'none':\n # save command for later processing\n self.h5commands.append((\"create_file\", self.file_name))" ]
[ "0.73253554", "0.6622011", "0.6496483", "0.64814097", "0.647887", "0.6392372", "0.63609046", "0.63100886", "0.62743527", "0.6262256", "0.6217909", "0.6199056", "0.61985654", "0.6194896", "0.61822635", "0.61728513", "0.6162515", "0.6148715", "0.6139941", "0.6126298", "0.6107426", "0.60834014", "0.60812706", "0.60620564", "0.60564655", "0.6047628", "0.60379785", "0.6037978", "0.602084", "0.59971565" ]
0.6652448
1
Check if composition changes too much from one step to another
def _system_changed_phase(self, prev_comp, comp): return np.abs(prev_comp - comp) > self._max_singlet_change
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_can_evolve(self):\n # This sounds similar to generate actions\n pass", "def converged(self) -> bool:", "def converged(self) -> bool:", "def converged(self) -> bool:", "def _compositions_swapped(self, thermo):\n assert self._ref_indicators is not None\n\n indicators = self._singlet_comparison(thermo)\n\n for list1, list2 in zip(indicators, self._ref_indicators):\n comp_swapped = True\n for ind1, ind2 in zip(list1, list2):\n if ind1 == ind2:\n comp_swapped = False\n if comp_swapped:\n return True\n return False", "def violated(self) -> bool:\n ...", "def check_collisions(self):", "def passes(self) -> bool:\n ...", "def _ftolCheck(self):\n oldLoss = biggestRecentLoss(self.loss, self.lookback)\n newLoss = float(self.loss[-1])\n fracDiff = 2 * (oldLoss - newLoss)/(oldLoss + newLoss)\n \n if fracDiff < self.ftol:\n \n self.converged = True", "def fusable(self) -> bool:\n if not self._pre_check() or not self.has_crossing_len2_ob():\n return False\n new_tiling = self._tiling.add_obstructions(self.obstructions_to_add())\n\n return (\n self._tiling == new_tiling\n and self._check_isolation_level()\n and all(\n self._can_component_fuse_assumption(assumption)\n for assumption in self._tiling.assumptions\n )\n )", "def is_equivalence(self) -> bool:", "def test_convergence(self, time_step):\n \n ##compare the average episode length between two loop\n if self.past_episode == time_step:\n self.convergence = True\n else:\n self.convergence = False", "def test_convergence(self, time_step):\n \n ##compare the average episode length between two loop\n if self.past_episode == time_step:\n self.convergence = True\n else:\n self.convergence = False", "def check(self, context):\n self.set_prompts_from_properties()\n self.product.obj_x.location.x = self.width\n self.product.obj_y.location.y = -self.depth\n props_closet.update_render_materials(self, context)\n # self.update_product_size()\n return True", "def check(self, context):\n self.set_prompts_from_properties()\n self.product.obj_x.location.x = self.width\n self.product.obj_y.location.y = -self.depth\n props_closet.update_render_materials(self, context)\n # self.update_product_size()\n return True", "def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)", "def KeepAdvancingSolutionLoop(self):\n return self.step < self.nsteps", "def test_update_composition(self):\n pass", "def reached_goal(state):\n return any(map(completely_removed, state['sliders']))", "def checkSolution(self):\n movesToEndblock = self.gridSize - self.changeable[0] - 2\n if self.checkMove(0,movesToEndblock) == 0:\n return 0\n return 1", "def victory_checker() -> bool:\r\n conflict_check()\r\n for x in range(shape):\r\n for y in range(shape):\r\n if conflict_space[x, y] != 0:\r\n return False\r\n if separation_crawler(False):\r\n return False\r\n return True", "def guard_liberate_transition(self):\n if self.get_free_positions:\n return True", "def _check_for_ko(self):\n try:\n if self._array == self._history[-2][0]:\n self._pop_history()\n raise BoardError('Cannot make a move that is redundant!')\n except IndexError:\n # Insufficient history...let this one slide\n pass", "def was_used(self):\r\n return self.circ_chosen != 0", "def check_change(self, state_variables):\n for control in self.__control_list:\n if control[0] != 'control':\n\t\t\t\t# sum of values of state variables of interest in the previous and the current interval of time\n sum1 = np.matmul(control[1], state_variables[:,0])\n sum2 = np.matmul(control[1], state_variables[:,1])\n\n if (np.sign(sum1 - control[2]) != np.sign(sum2 - control[2])):\n self.__active_control = control\n return True\t\n return False", "def isPossibleSubsumer(self):\n if self.action_cnt > cons.theta_sub and self.error < cons.err_sub: #self.prediction < cons.err_sub: (why does it work?)\n return True\n return False", "def goal_test(self, state):\n for x, y in state.alvos:\n if state.tabuleiro[x][y] is not BOX_ON_TARGET:\n return False\n return True", "def check_evolve(self):\n if self.team == 'white':\n if self.position[0] == 0:\n self.evolve()\n \n else:\n if self.position[0] == 7:\n self.evolve()", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def computed(cls, pdb_object):\n return cls.name in pdb_object.completed_steps" ]
[ "0.6158223", "0.609395", "0.609395", "0.609395", "0.60568446", "0.6024883", "0.5987834", "0.5795023", "0.574844", "0.5735461", "0.57305664", "0.5709299", "0.5709299", "0.5688929", "0.5688929", "0.56603444", "0.5645165", "0.5631118", "0.562156", "0.5614584", "0.560819", "0.559211", "0.55857044", "0.5582541", "0.557462", "0.5573404", "0.5563148", "0.55533975", "0.5550189", "0.553547" ]
0.61296654
1
Return array of the singlet terms
def _get_singlet_array(self, thermo): singlets = [] for entry in thermo: singlets.append([entry[get_singlet_name(name)] for name in self._singlet_names]) return singlets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def terms(self):\n return self._terms", "def get_terms(document):\n q = get_mapped(document)\n tokens = tockenizer(q)\n terms = analizer(tokens)\n\n return terms", "def terms(self) -> Tuple[Term, ...]:\n ...", "def get_all_terms(self):\n return self.term.all()", "def get_search_terms(self):\n params = self.request.QUERY_PARAMS.get(\"search\", \"\")\n return params.replace(\",\", \" \").split()", "def get_terms(self):\n return json.loads(self.terms)", "def to_terms(self, sentence: str) -> List[int]:\n tokens = self._tokenize(sentence)\n ret = self._tokens_to_terms(tokens)\n return ret", "def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names", "def termnames(self):\n\n names = []\n for term in self.terms:\n names += [term.termname]\n return names", "def _flow_terms(flow):\n return flow.synonyms", "def tokens(self):\n\t\tlabels_and_synonyms = list(itertools.chain.from_iterable(list(self.term_to_tokens.values())))\n\t\ttokens = set(list(itertools.chain.from_iterable([word_tokenize(x) for x in labels_and_synonyms])))\n\t\treturn(list(tokens))", "def terms(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"terms\")", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def names(self):\n\n allnames = []\n for term in self.terms:\n allnames += term.names()\n return allnames", "def variables(self):\n return [term.variable for term in self.terms]", "def valence_terms(cls, topology):\n return [tuple(b.atoms) for b in topology.bonds]", "def synth_tokens(self):\n if self.lliagraph:\n return self.lliagraph.synth_tokens.items()\n else:\n return []", "def signature(cls) -> List[Term]:\n el = []\n for term in cls.__dict__.values():\n if not isinstance(term, (Constant, Function)):\n continue\n el.append(deepcopy(term))\n return el", "def generate_input(s_terms):\n qm = QuineMcCluskey()\n res = set()\n if len(s_terms) == 0:\n return res\n for term in s_terms:\n res = res | set([i for i in qm.permutations(term)])\n return res", "def get_all(cls):\n\n words = db.session.query(Term.word).all()\n\n return [word[0] for word in words]", "def terms(f):\n return dmp_terms(f.rep, f.lev, f.dom)" ]
[ "0.69178885", "0.69178885", "0.69178885", "0.69178885", "0.69178885", "0.69178885", "0.69178885", "0.69178885", "0.69178885", "0.67557126", "0.66699016", "0.6567775", "0.63677454", "0.6355416", "0.6226497", "0.6089282", "0.6078308", "0.6078308", "0.6032249", "0.60092956", "0.59885263", "0.59667826", "0.59667826", "0.596079", "0.5920294", "0.59188575", "0.5907947", "0.5892962", "0.58780456", "0.58751845" ]
0.6973103
0
Check if one of the systems changed phase
def _one_system_changed_phase(self, thermo, ref_values): singlet_array = self._get_singlet_array(thermo) for cur_array, ref_array in zip(singlet_array, ref_values): for cur_val, ref_val in zip(cur_array, ref_array): if self._system_changed_phase(cur_val, ref_val): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _system_changed_phase(self, prev_comp, comp):\n return np.abs(prev_comp - comp) > self._max_singlet_change", "def has_state_changed(self) -> bool:\r\n ...", "def check_device_state(self):", "def check_change(self, state_variables):\n for control in self.__control_list:\n if control[0] != 'control':\n\t\t\t\t# sum of values of state variables of interest in the previous and the current interval of time\n sum1 = np.matmul(control[1], state_variables[:,0])\n sum2 = np.matmul(control[1], state_variables[:,1])\n\n if (np.sign(sum1 - control[2]) != np.sign(sum2 - control[2])):\n self.__active_control = control\n return True\t\n return False", "def check_unstaged_changes(self):\n pass", "def check_device_changes(self):\n\n #---------------------------------------------------------------------------\n # USB ports\n current_serial_devices = self.enumerate_serial_devices()\n\n for device in self.old_serial_devices:\n if device not in current_serial_devices:\n print(\"Removed USB port: \", device)\n self.removed_serial_devices.append(device)\n\n self.arduino_change_signal.emit('OFF')\n\n for device in current_serial_devices:\n if device not in self.old_serial_devices:\n print(\"Added USB port: \", device)\n self.added_serial_devices.append(device)\n\n self.arduino_change_signal.emit('ON')\n\n self.old_serial_devices = current_serial_devices\n\n #---------------------------------------------------------------------------\n # MIDI port detection\n current_midi_devices = self.enumerate_midi_devices()\n\n for device in self.old_midi_devices:\n if device not in current_midi_devices:\n print(\"Removed MIDI port: \", device)\n self.removed_midi_devices.append(device)\n\n self.piano_change_signal.emit('OFF')\n\n for device in current_midi_devices:\n if device not in self.old_midi_devices:\n print(\"Added MIDI port: \", device)\n self.added_midi_devices.append(device)\n\n self.piano_change_signal.emit('ON')\n\n self.old_midi_devices = current_midi_devices", "def has_changed(self):\n return self.get_old_value() != self.get_current_value()", "def detect_paramchange(self,t_final):\n id1 = np.searchsorted(self.shift_times,t_final)-1\n if id1 != self.current_region:\n return True\n else:\n return False", "def check_state(self):\n pass", "def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()", "def already_processed(self):\n # If the flag file has been created by a previous run\n # or if any of the rules have already been re-ordered\n # then we shouldn't make any more changes and instead\n # the system needs to be rebooted.\n return self.syspaths.flag_exists", "def get_changed() -> bool:\n return g.ledger.changed()", "def needs_update(self, system, environment_input):\n pass", "def phase_check(self, num, line):\n\t\tpass", "def probe(self):\n return False", "def has_unapplied_change(self):\n for name in self.params_to_display.keys():\n if self._tkvar_changed(name):\n return True\n return False", "def check_lighting_state_room2():\n if timer_lights_on_off_room2() == room2_lux():\n pass\n else:\n light_room2(timer_lights_on_off_room1())", "def checkChanges(self):\n results = [\n self.values[1],\n self.values[f\"-{self.values[1]}-\"],\n self.values[\"-TOGGLE-ALL-\"],\n self.values[\"-INVITED-\"],\n self.values[\"-ASSIGNED-\"],\n self.values[\"-GRADED-\"],\n self.values[\"-BLOCKED-\"] ]\n\n if results == self.oldResults[1::]:\n self.oldResults = [False] + results\n\n elif (self.values[f\"-{self.values[1]}-\"] == [] and \\\n self.values[\"-TOGGLE-ALL-\"] == False and \\\n results[0] != self.oldResults[1]):\n self.window['-OUTPUT-'].update('')\n self.oldResults = [False] + results\n\n else:\n self.oldResults = [True] + results", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def isChanged(self, p_int): # real signature unknown; restored from __doc__\n return False", "def is_changed(self) -> bool:\n return self.selected_vms != self._initial_vms", "def check_lighting_state_room1():\n if timer_lights_on_off_room1() == room1_lux():\n pass\n else:\n light_room1(timer_lights_on_off_room1())", "def changed(self):\n return True", "def has_state_changed(self):\n return bool(RPR.AudioAccessorValidateState(self.id))", "def has_changed(self):\n return bool(self.changed_data)", "def hadChanged(self):\n return self.changed", "def check_flag(self):\n flag = 0\n if self.new_st_name:\n flag = 1\n elif self.ports and self.pt_state is not None:\n flag = 1\n elif self.initiators and self.ini_state is not None:\n flag = 1\n elif self.virvols and self.virvol_state is not None:\n flag = 1\n return flag", "def changed(self):\n\t\tpass", "def pre_flight_checks(self):\n #=======================================================================\n #\n # TODO: Place any system checks here.\n #\n #=======================================================================\n return True", "def _is_done_illegal_state(self, observation):\n servers_used_mem = np.zeros(len(self.servers_mem))\n for i, _ in enumerate(servers_used_mem):\n servers_used_mem[i] = np.sum(self.services_mem[observation==i])\n return np.alltrue(np.array(self.servers_mem) < servers_used_mem)" ]
[ "0.758044", "0.6362879", "0.61289746", "0.59745437", "0.5927031", "0.5877956", "0.58395505", "0.5811819", "0.58104575", "0.5734162", "0.57106453", "0.5701636", "0.5695618", "0.568467", "0.5654003", "0.56400806", "0.5624871", "0.56106794", "0.5610273", "0.56056416", "0.55995566", "0.55938417", "0.5586867", "0.5577304", "0.5564605", "0.55605465", "0.55453515", "0.5542361", "0.5522061", "0.54939073" ]
0.7447519
1
Checks if the predicted and the computed values match
def _prediction_match(self, thermo, ref_values, eps=0.05): singlet_array = self._get_singlet_array(thermo) for cur_array, ref_array in zip(singlet_array, ref_values): for cur_val, ref_val in zip(cur_array, ref_array): if abs(cur_val - ref_val) > eps: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_prediction(self):\n predicted_scores = self.sess.run(self.NET.output_with_relu, feed_dict={self.NET.input: self.test_image if len(self.test_image.shape)==4 else [self.test_image]})\n self.original_confidence = np.max(predicted_scores)\n if np.argmax(predicted_scores,1) != self.original_label:\n print(\"Network's Prediction is Already Incorrect!\")\n return True\n else:\n return False", "def CheckWrong(predicted, correct):\n for prediction in predicted:\n if prediction != correct:\n print(\"Expected: \", correct,\" recieved: \", prediction)\n return False\n return True", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def test_predict(self):\n \n\n model ,vec, x_testing=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"count\")\n \n model2 ,vec_tfidf, x_testing2=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"tfidf\")\n \n \n \"\"\" Test correct data types and corrrect range of predicted values (1,0) for predict with countVectorizer\"\"\" \n \n self.assertIsInstance(predict(model,x_testing),\n np.ndarray)\n \n self.assertTrue(([0,1] ==np.unique(predict(model2,x_testing2))).all())\n\n \n \"\"\" Test correct data types and corrrect range of predicted values (1,0) for predict with tfidfVectorizer\"\"\" \n \n self.assertIsInstance(predict(model,x_testing),\n np.ndarray)\n \n self.assertTrue(([0,1] ==np.unique(predict(model2,x_testing2))).all())", "def test(self, data_set):\r\n\r\n correct = 0.0\r\n total = 0.0\r\n\r\n for input, target in data_set:\r\n #actual output from neural net\r\n output = self.predict(input)\r\n total += 1.0 #number of total output vectors\r\n\r\n if allclose(output, target, self.converge) == True:\r\n correct += 1.0\r\n\r\n return correct/total", "def test_predict(self):\n self.regression_single.predict(self.X_test)\n self.assertTrue(len(self.regression_single.y_pred))\n self.regression_boston.predict(self.boston_x_test)\n self.assertTrue(len(self.regression_boston.y_pred))", "def _compute_final_accuracies(self, meval):\n valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val')\n if self.hparams.eval_test:\n test_accuracy = self.eval_child_model(meval, self.data_loader, 'test')\n else:\n test_accuracy = 0\n tf.logging.info('Test Accuracy: {}'.format(test_accuracy))\n return valid_accuracy, test_accuracy", "def _evaluate(self, y_true, y_pred):\n pass", "def compare_predictions():\n validation_labels = np.array(pd.read_csv(val_true_labels_dir + dataset_version + 'validation_labels.csv', index_col=0))\n validation_labels = np.reshape(validation_labels, (-1))\n\n diff_between_files = []\n also1s = []\n also2s = []\n for filename1 in os.listdir(val_predictions_dir):\n if filename1.endswith(\".csv\"):\n for filename2 in os.listdir(val_predictions_dir):\n if filename2.endswith(\".csv\"):\n if filename1 < filename2:\n wrong1 = 0\n wrong2 = 0\n diff_between = 0\n also1 = 0\n also2 = 0\n diff_corr1 = 0\n diff_corr2 = 0\n f1 = np.array(pd.read_csv(val_predictions_dir + filename1, index_col=0))\n f1 = np.reshape(f1, (-1))\n f2 = np.array(pd.read_csv(val_predictions_dir + filename2, index_col=0))\n f2 = np.reshape(f2, (-1))\n for line in range(f1.shape[0]):\n if f1[line] != validation_labels[line]:\n wrong1 += 1\n if f2[line] != validation_labels[line]:\n wrong2 += 1\n if f1[line] != f2[line]:\n diff_between += 1\n if f1[line] == validation_labels[line]:\n diff_corr1 += 1\n if f2[line] == validation_labels[line]:\n diff_corr2 += 1\n if f1[line] != validation_labels[line]:\n if f2[line] != validation_labels[line]:\n also2 += 1\n if f2[line] != validation_labels[line]:\n if f1[line] != validation_labels[line]:\n also1 += 1\n\n diff_between_files.append(diff_between)\n print(filename1)\n print('Wrongly predicted by 1: ' + str(100 * wrong1 / f1.shape[0]) + '%')\n print(filename2)\n print('Wrongly predicted by 2: ' + str(100 * wrong2 / f1.shape[0]) + '%')\n print()\n print('Differences between files: ' + str(100 * diff_between / f1.shape[0]) + '%')\n print(f'\\t of which correct by 1 {100 * diff_corr1 / diff_between}%, by 2 {100 * diff_corr2 / diff_between}%')\n also1s.append(also1 / wrong2)\n also2s.append(also2 / wrong1)\n print('Wrongly predicted by other among wrong ones: ' + str(100 * also2 / wrong1) + '%, ' + str(\n 100 * also1 / wrong2) + '%\\n\\n\\n')\n\n print('Max, min and avg differences between files:')\n print(str(100 * max(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * min(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * np.mean(diff_between_files) / validation_labels.shape[0]) + '%')\n\n print('\\nWrongly predicted by first that were also wrongly predicted by second:')\n print('Max: ' + str(100 * max(also2s)) + '%')\n print('Min: ' + str(100 * min(also2s)) + '%')\n print('Avg: ' + str(100 * np.mean(also2s)) + '%')\n\n print('\\nWrongly predicted by second that were also wrongly predicted by first:')\n print('Max: ' + str(100 * max(also1s)) + '%')\n print('Min: ' + str(100 * min(also1s)) + '%')\n print('Avg: ' + str(100 * np.mean(also1s)) + '%')", "def test_using_predict(self):\n [X, labels, Y] = self.gen_data()\n # Call algorithm\n bias = multiLogReg(self.sds.from_numpy(\n X), self.sds.from_numpy(Y), verbose=False).compute()\n\n [m, y_pred, acc] = multiLogRegPredict(self.sds.from_numpy(\n X), self.sds.from_numpy(bias), self.sds.from_numpy(Y), verbose=False).compute()\n\n self.assertTrue(acc > 98)", "def test_response_value(predict, y):\r\n print(\"test_response_value()...\", end = \"\")\r\n if len(set(y)) == 1:\r\n assert (predict == y).all()\r\n print(\"Passed!\")", "def compare(sampl_predict, sampl_real):\n difference = 0\n for i in range(len(sampl_predict)):\n if sampl_predict[i] != sampl_real[i]:\n difference += 1\n\n return difference", "def calculate_metrics(self, predictions, actual):\n\n predictions.dtype = np.bool\n actual.dtype = np.bool\n\n N = len(predictions) * len(predictions[0])\n\n TP = np.sum(np.bitwise_and(predictions, actual))\n FP = np.sum(np.bitwise_and(np.invert(predictions), np.invert(actual) ))\n FN = np.sum(np.bitwise_and(predictions, np.invert(actual)))\n TN = np.sum(np.bitwise_and(np.invert(predictions), (actual)))\n\n correct = np.sum(predictions == actual) / N\n accuracy = (TP + TN) / N\n precision = TP / (TP + FP) # positive predictive value\n sensitivity = TP / (TP + FN) # true positive rate\n specificity = TN / (TN + FP) # true negative rate\n\n return correct, accuracy, precision, sensitivity, specificity", "def get_correct(self, predicted, actual):\n ret_ratios = [np.sum(predicted == actual) / len(actual)]\n for i in range(2):\n actual_ones = np.where(actual == i)[0]\n should_be_ones = np.take(predicted, actual_ones)\n actual_ones = np.take(actual, actual_ones)\n ret_ratios.append(np.sum(should_be_ones == actual_ones) / len(actual_ones))\n\n return tuple(ret_ratios)", "def score(self, y_true, y_pred):\r\n pass", "def evaluate(self, true_values, predicted_values):\n if self.classification_type == \"classification\":\n cross_entropy = self.cross_entropy(true_values, predicted_values)\n #self.percent_accuracy(true_values,predicted_values)\n return cross_entropy\n elif self.classification_type == \"regression\":\n MSE = self.mean_squared_error(true_values, predicted_values)\n self.mean_absolute_error(true_values, predicted_values)\n\n return MSE", "def evaluate(predicted, actual):\r\n assert(len(predicted) == len(actual))\r\n total = len(actual)\r\n correct = len([x for x in range(total) if predicted[x] == actual[x]])\r\n return (float(correct) / float(total)) * 100.0", "def evaluate(labels, predictions):\n\n truePositiveCounter = 0\n trueNegativeCounter = 0\n truePositiveCorrect = 0\n trueNegativeCorrect = 0\n \n sensitivity = 0\n specificity = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n truePositiveCounter += 1\n if(labels[i] == predictions[i]):\n truePositiveCorrect += 1\n elif labels[i] == 0:\n trueNegativeCounter += 1\n if(labels[i] == predictions[i]):\n trueNegativeCorrect += 1\n\n sensitivity = truePositiveCorrect / truePositiveCounter\n specificity = trueNegativeCorrect / trueNegativeCounter\n\n return sensitivity, specificity", "def validate(self, validation_data):\n counter = 0\n for idx, x in enumerate(validation_data[0]):\n predicted = self.predict(x)\n #print(\"actual = \", validation_data[1][idx], \" -> predicted = \", predicted)\n if self.predict(x) == validation_data[1][idx]:\n counter += 1\n\n return counter/len(validation_data[1])", "def assert_predictions_equal(first, second, x):\n preds1 = first.predict(x, batch_size=batch_size)\n preds2 = second.predict(x, batch_size=batch_size)\n np.testing.assert_array_equal(preds1, preds2)", "def one_zero_loss(self, test_set, predicted_values):\r\n\r\n incorrect=0\r\n for i in range(len(test_set)):\r\n if test_set[i].classification != predicted_values[i]:\r\n incorrect += 1\r\n self.performance += incorrect / len(test_set)\r\n self.num_performances += 1\r\n return incorrect / len(test_set)", "def _check_value(self, y_pred, y):\n if self._type != 'classification' and not (np.equal(y_pred ** 2, y_pred).all() and np.equal(y ** 2, y).all()):\n raise ValueError('For multilabel case, input value must be 1 or 0.')", "def test_model_outcome(predicted, actual, planned):\n if not isinstance(predicted, pd.DataFrame):\n predicted = pd.DataFrame(predicted, columns=[\"PREDICTED_TRIP_DURATION\"])\n if not isinstance(actual, pd.DataFrame):\n actual = pd.DataFrame(actual, columns=[\"ACTUAL_TRIP_DURATION\"])\n if not isinstance(planned, pd.DataFrame):\n planned = pd.DataFrame(planned, columns=[\"PLANNED_TRIP_DURATION\"])\n # Initialise the combined dataframe\n combined = pd.concat([predicted, actual, planned], axis=1)\n # Calculate the actual delay\n actual_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"ACTUAL_TRIP_DURATION\"]\n # Calculate the predicted delay\n predicted_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"PREDICTED_TRIP_DURATION\"]\n # Calculate the difference in delay\n delay_diff = actual_delay - predicted_delay\n # Combine the delays into a single dataframe\n combined_delay = pd.concat([pd.DataFrame(actual_delay, columns=['Actual_Delay']),\n pd.DataFrame(predicted_delay, columns=['Predicted_Delay']),\n pd.DataFrame(delay_diff, columns=['Difference_In_Delay'])], axis=1)\n # Obtain the index of the max and min values of the actual, predicted and difference delays\n actual_max_index = combined_delay[\"Actual_Delay\"].argmax()\n actual_min_index = combined_delay[\"Actual_Delay\"].argmin()\n predicted_max_index = combined_delay[\"Predicted_Delay\"].argmax()\n predicted_min_index = combined_delay[\"Predicted_Delay\"].argmin()\n delay_diff_max_index = combined_delay[\"Difference_In_Delay\"].argmax()\n delay_diff_min_index = combined_delay[\"Difference_In_Delay\"].argmin()\n # Get the Mean Absolute Error\n MAE = metrics.mean_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the R2 Score\n R2 = metrics.r2_score(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Root Mean Squared Error\n RMSE = metrics.mean_squared_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"],\n squared=False)\n # Get the Median Absolute Error\n MEDAE = metrics.median_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Mean Squared Error Log Value\n MSLE = metrics.mean_squared_log_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Build Dictionary\n pass_val = {\"combined\": combined,\n \"combined_delay\": combined_delay,\n \"actual_max_index\": actual_max_index,\n \"actual_min_index\": actual_min_index,\n \"predicted_max_index\": predicted_max_index,\n \"predicted_min_index\": predicted_min_index,\n \"delay_diff_max_index\": delay_diff_max_index,\n \"delay_diff_min_index\": delay_diff_min_index,\n \"MAE\": MAE,\n \"R2\": R2,\n \"MEDAE\": MEDAE,\n \"RMSE\": RMSE,\n \"MSLE\": MSLE}\n # Return Dictionary\n return pass_val", "def percent_accuracy(self, true_values, predicted_values):\n\n correct = 0\n size = len(true_values)\n for i in range(len(true_values)):\n true_labels = true_values[i]\n predicted_labels = predicted_values[i]\n predicted_index = np.argmax(predicted_labels)\n\n if true_labels[predicted_index] == 1:\n correct += 1", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def evaluate(labels, predictions):\n #labels and predictions\n truePos = 0\n trueNeg = 0\n for data in range(len(labels)):\n if((predictions[data] == 1) and (predictions[data] == labels[data])):\n truePos+=1\n elif((predictions[data] == 0) and (predictions[data] == labels[data])):\n trueNeg+=1\n sensitivity = truePos/(len(labels) + 1)\n specificity = trueNeg/(len(labels) + 1)\n return (sensitivity, specificity)\n \n\n #raise NotImplementedError", "def evaluate(labels, predictions):\n correct_positive = 0\n correct_negative = 0\n total_positive = 0\n total_negative = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n total_positive += 1\n if predictions[i] == 1:\n correct_positive += 1\n else:\n total_negative += 1\n if predictions[i] == 0:\n correct_negative += 1\n\n sensitivity = correct_positive / total_positive\n specificity = correct_negative / total_negative\n\n return sensitivity, specificity", "def test_score(self):\n pred_copy_simple = np.copy(self.regression_single.y_pred)\n pred_copy_boston = np.copy(self.regression_boston.y_pred)\n\n self.assertEqual(pred_copy_simple.shape, self.y_test.shape)\n self.assertEqual(pred_copy_boston.shape, self.boston_y_test.shape)", "def compare_rmse(x_true, x_pred):\n x_true, x_pred = x_true.astype(np.float32), x_pred.astype(np.float32)\n return np.linalg.norm(x_true - x_pred) / (np.sqrt(x_true.shape[0] * x_true.shape[1] * x_true.shape[2]))", "def make_predictions(model, test_set, val_set):\n \n ## Uses model to predict some amount of images\n predict = model.predict_classes(test_set, batch_size=5, verbose=1)\n \n ## We use the length of these two arrays when we sift through the data to find\n ## the right predictions and wrong predictions\n images = len(test_set)\n\n ## Initialises variables for loop\n correctly_guessed = 0\n\n ## Begins loop to find total correct predictions\n for i in range(images):\n if predict[i] == np.argmax(val_set[i]):\n correctly_guessed += 1\n\n ## Returns amount of predictions were correct\n print('\\nCorrectly guessed = ', correctly_guessed)\n print('Inorrectly guessed = ', (images - correctly_guessed))" ]
[ "0.6819916", "0.6686152", "0.6664491", "0.66569054", "0.658159", "0.65720046", "0.65405303", "0.65399057", "0.6520549", "0.6491463", "0.64827454", "0.6441679", "0.64338255", "0.6414647", "0.6393218", "0.6378676", "0.6371993", "0.6312835", "0.6306903", "0.6305888", "0.62949526", "0.6285498", "0.6276431", "0.6252538", "0.62484217", "0.62446773", "0.62217104", "0.62016785", "0.6188925", "0.6174405" ]
0.6692643
1
Initialize the SGC MC objects
def _init_sgc(self, init_temp, symbols): self._sgc_obj = [] for ground_state in self._ground_states: self._sgc_obj.append( SGCMonteCarlo( ground_state["atoms"], init_temp, symbols=symbols))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\r\n\r\n self.Helpers = Helpers(\"Movidius\")\r\n self.confs = self.Helpers.confs\r\n\r\n self.classes = []\r\n self.ncsGraph = None\r\n self.ncsDevice = None\r\n self.reqsize = None\r\n\r\n self.mean = 128\r\n self.std = 1 / 128\r\n\r\n #mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)\r\n\r\n self.Helpers.logger.info(\"Movidius class initialization complete.\")", "def setup_class(cls):\n cls.rounding_precision = 8\n cwd = os.getcwd()\n if(cwd.split(os.sep)[-1]==\"pyCGM_Single\"):\n parent = os.path.dirname(cwd)\n os.chdir(parent)\n cls.cwd = os.getcwd()\n\n #Load data from SampleData/ROM/ for testing\n dynamic_trial,static_trial,vsk_file,_,_ = pyCGM_Helpers.getfilenames(x=2)\n cls.motion_data = pycgmIO.loadData(os.path.join(cls.cwd, dynamic_trial))\n cls.static_data = pycgmIO.loadData(os.path.join(cls.cwd, static_trial))\n cls.vsk_data = pycgmIO.loadVSK(os.path.join(cls.cwd, vsk_file), dict=False)\n cls.cal_SM = pycgmStatic.getStatic(cls.static_data,cls.vsk_data,flat_foot=False)", "def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()", "def memb_init(self):\n self.initialize()", "def initialize(self):\n self.initilize_multiply_array() # m\n self.initialize_cameras()\n self.initialize_electronics()\n self.logger.info('Starting free runs and continuous reads')\n self.camera_microscope.start_free_run()\n self.camera_microscope.continuous_reads()\n self.camera_fiber.start_free_run()\n self.camera_fiber.continuous_reads()\n self.servo_off()\n\n time.sleep(1) #m Without the sleep below initialize_multiply_array does not work", "def init():", "def initialize(self):\n self.gc1.reset_parameters()\n self.gc2.reset_parameters()\n\n for s in self.scores:\n stdv = 1. / math.sqrt(s.size(1))\n s.data.uniform_(-stdv, stdv)\n for b in self.bias:\n # fill in b with postive value to make\n # score s closer to 1 at the beginning\n b.data.fill_(self.bias_init)\n\n for Dk in self.D_k:\n stdv = 1. / math.sqrt(Dk.size(1))\n Dk.data.uniform_(-stdv, stdv)\n\n for b in self.D_bias:\n b.data.fill_(0)", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def initialize(self):\n # FIX: INITIALIZE PROCESS INPUTS??\n for mech, value in self.initial_values.items():\n mech.initialize(value)", "def __init__ (self) :\n self.loadCSPAD2x2CalibParsDefault()", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def _set_init(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n ## Auxiliar information\n self.ks = None\n self.iss = [0]\n ## Class structural information\n self._setted = False\n self._constant_rel_pos = False\n self.staticneighs = None\n self.staticneighs_set = None", "def __init__(self):\n \n self.mineLatLong = np.array([0.0,0.0]) \n self.theOreBody = OreBodyDataManager()\n self.theMiningSystem = MiningSystemDataManager()\n self.theProcessingSystem = ProcessingSystemDataManager()\n self.theEconomicDataManager = EconomicDataManager()\n self.theInfrastructureManager = InfrastructureDataManager()", "def __init__(self):\n\n super(aero_csm_component,self).__init__()", "def __init__(self):\n # Call parent initialisers\n # SecmUtilityCore.__init__(self)\n Node.__init__(self, \"vehicle_sim\")\n # super().__init__('vehicle_sim')\n\n self.vehicle_marker_array = MarkerArray()\n self.vehicle_marker = Marker()\n self.pose_msg = Pose()\n self.control_msg = Control()\n\n self.model = Model()\n\n # Create subscribers to listen to SECM output\n self.create_subscription(\n msg_type=Control,\n topic=\"/control\",\n callback=self.receive_control_msg,\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Create pose publisher\n self.pose_publisher = self.create_publisher(\n msg_type=Pose,\n topic=\"/pose\",\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Create marker publisher\n self.vehicle_marker_publisher = self.create_publisher(\n msg_type=Marker,\n topic=\"/vehicle_marker\",\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Setup timers to spin the execution loop. \n self.create_timer(1.0/30.0, self.execute)", "def do_init(self):\n\n pass", "def initialize(self):\n\t\tpass", "def init_cg(self):\n self.add_pc(2)\n self.pb[self.pc - 2] = \"ASSIGN\", _m(CodeGenerator.REGISTER_SIZE + CodeGenerator.INIT_MEMORY_VALUE, \"#\"), _m(\n self.top_sp)\n self.pb[self.pc - 1] = \"ASSIGN\", _m(self.top_sp), _m(self.top_sp, \"@\")\n self.init_global_func()\n self.make_output()", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n self.send_init_command()", "def init(self) -> None:\n ..." ]
[ "0.67107725", "0.6539785", "0.6507728", "0.64441645", "0.64169127", "0.64014566", "0.6322285", "0.6314256", "0.6253781", "0.62345904", "0.62134373", "0.62134373", "0.62134373", "0.62134373", "0.62134373", "0.62134373", "0.62134373", "0.62134373", "0.6211918", "0.62051463", "0.6190481", "0.6166541", "0.6146446", "0.61344326", "0.61318374", "0.6130919", "0.6130919", "0.6130919", "0.6105314", "0.6101238" ]
0.68712074
0
Check that the ground_state arguments contain the correct fields
def check_gs_argument(ground_state): required_fields = ["bc", "cf", "eci", "atoms"] keys = ground_state.keys() for key in keys: if key not in required_fields: raise ValueError( "The GS argument has to contain {} keys. Given {}".format( required_fields, keys))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _verify_command_states(cls, kwargs):\n return kwargs", "def check_state(self):\n pass", "def _check_params(self):\n pass", "def validate_ground_input(ground: tuple) -> None:\n\n if not isinstance(ground, tuple):\n raise InvalidGroundValueError(\n f\"Object must be a tuple\"\n f\"with format (1, 2), not {ground}\"\n )", "def test_get_field_state_comparisons_no_comp_states(self):\r\n self.assertRaises(ValueError, get_field_state_comparisons,\r\n self.dist_matrix_header, self.dist_matrix,\r\n self.mapping_header, self.mapping, self.field,\r\n [])", "def _verify(\n hass,\n expected_state,\n expected_percentage,\n expected_oscillating,\n expected_direction,\n expected_preset_mode,\n):\n state = hass.states.get(_TEST_FAN)\n attributes = state.attributes\n assert state.state == str(expected_state)\n assert attributes.get(ATTR_PERCENTAGE) == expected_percentage\n assert attributes.get(ATTR_OSCILLATING) == expected_oscillating\n assert attributes.get(ATTR_DIRECTION) == expected_direction\n assert attributes.get(ATTR_PRESET_MODE) == expected_preset_mode", "def _validateInputs(self):\n if self.args[\"Counties\"] == [] and self.args[\"BBox\"] == None:\n raise Exception(\"Invalid arguments provided. Must provide either a geographical bounding box or a list of counties.\")\n\n if self.args[\"StartDateTime\"] > self.args[\"EndDateTime\"]:\n raise Exception(\"Invalid arguments provided. StartDateTime cannot be after EndDateTime\")", "def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()", "def _check_args(self, args_):\n\n pass", "def test_get_field_state_comparisons_bad_comp_state(self):\r\n self.assertRaises(ValueError, get_field_state_comparisons,\r\n self.dist_matrix_header, self.dist_matrix,\r\n self.mapping_header, self.mapping, self.field,\r\n ['T0', 'Fast'])\r\n self.assertRaises(ValueError, get_field_state_comparisons,\r\n self.dist_matrix_header, self.dist_matrix,\r\n self.mapping_header, self.mapping, self.field,\r\n ['Fast', 'T0'])", "def _check(self):\n if not isinstance(self.fc_layers, tuple):\n raise TypeError(f'fc_layers require tuple, get {type(self.fc_layers)}')\n if not isinstance(self.use_dropout, tuple):\n raise TypeError(f'use_dropout require tuple, get {type(self.use_dropout)}')\n if not isinstance(self.drop_prob, tuple):\n raise TypeError(f'drop_prob require tuple, get {type(self.drop_prob)}')\n if not isinstance(self.use_activation, tuple):\n raise TypeError(f'use_activation require tuple, get {type(self.use_activation)}')\n l_fc_layer = len(self.fc_layers)\n l_use_drop = len(self.use_dropout)\n l_drop_prob = len(self.drop_prob)\n l_use_activation = len(self.use_activation)\n pass_check = l_fc_layer >= 2 and l_use_drop < l_fc_layer and l_drop_prob < l_fc_layer and l_use_activation < l_fc_layer and l_drop_prob == l_use_drop\n if not pass_check:\n msg = 'Wrong BaseDiscriminator parameters!'\n raise ValueError(msg)", "def validate_ground_data_input(cls, obj: str) -> (tuple, tuple):\n\n ground = cls._get_ground_data(obj)\n points = cls._get_coordinates_data(obj)\n\n ground, points = ProcessObjectDatatype.process_ground_data_to_tuples(\n ground, points\n )\n\n cls.validate_coordinates_input(points)\n cls.validate_ground_input(ground)\n cls.validate_ground_size(ground, points)\n\n return ground, points", "def goal_test(self, state):\n \"*** YOUR CODE HERE ***\"\n if (state[0], state[1]) in self.goals: #Check to see if at goal state\n return True\n else:\n return False", "def check_params(self):\r\n \r\n # TODO: More cases?\r\n\r\n if self.N <= 0:\r\n print('Bad Parameter: N')\r\n \r\n if self.Ha_tally <= 0 or self.Ha_tally > self.N:\r\n print('Bad Parameter: Reported winner tally')\r\n \r\n if len(self.round_sched) < 1 or not self.check_inc_sched(self.round_sched):\r\n print('Bad Parameter: Round Schedule')\r\n\r\n if self.alpha <= 0 or self.alpha >= .5:\r\n print('Bad Parameter: Alpha')", "def is_valid(self,):\r\n return self.g > 0 and self.l > 0 and self.m1 > 0 and self.m2 > 0 and self.m3 > 0 and self.r1 > 0 and self.r2 > 0 and self.tau > 0 and self.theta1 > 0 and self.theta2 > 0 and self.theta3 > 0", "def test_too_many_props(self):\n with pytest.raises(ValueError):\n State(\n substance=\"water\",\n T=Q_(300, \"K\"),\n p=Q_(101325, \"Pa\"),\n u=Q_(100, \"kJ/kg\"),\n )", "def _assert_state(self, state: Union[TrainingState_, List[TrainingState_]]) -> None:\n # Since assert can be turned off and this error checking\n # is really important, we use explicit error checking\n # and raise a ValueError if needed.\n if isinstance(state, TrainingState_):\n state = [state]\n if self.training_state not in state:\n msg = (\n f\"expected to be in states {state} but current state \"\n f\"is {self.training_state}\"\n )\n # In case we are failing in the context of autograd hook, asserting\n # may not generate useful msg. So, let's print it to be sure.\n if self.rank == 0:\n print(f\"Asserting FSDP instance is: {self}\")\n print(f\"ERROR: {msg}\")\n traceback.print_stack()\n raise ValueError(msg)", "def valid_state(given_state):\n if given_state not in [\"ON\", \"OFF\"]:\n raise argparse.ArgumentTypeError(\"Invalid state given: \" + given_state)\n return given_state", "def test_State_none_kwargs(self):\n with self.assertRaises(TypeError):\n State(id=None, created_at=None, updated_at=None)", "def grok_state(self, obj):\n if 'state' in obj:\n my_state = obj['state'].lower()\n if my_state != 'absent' and my_state != 'present':\n raise aomi \\\n .exceptions \\\n .Validation('state must be either \"absent\" or \"present\"')\n\n self.present = obj.get('state', 'present').lower() == 'present'", "def arg_check(self):\n # If the user didn't input a value for the start frame, start at frame 1.\n if not self.start_frm_le.text():\n self.start_frm = '1'\n self.start_frm_le.setText('1')\n\n # If the user didn't input a value for the end frame, end at frame 24\n if not self.end_frm_le.text():\n self.end_frm = '24'\n self.end_frm_le.setText('24')\n \n # If the user set the start or end time to something other than a digit.\n sf = str(self.start_frm)\n ef = str(self.end_frm)\n\n if not sf.isdigit() or not ef.isdigit():\n print \"The start and end frames must be whole numbers.\"\n return None\n\n # If wireframe checkbox is checked, toggle wireframe.\n if self.ren_cb.isChecked():\n self.wireframe = True\n elif not self.ren_cb.isChecked():\n self.wireframe = False\n\n return True", "def test_empty_arguments(self):\n arg1 = {'keyAttributes': 'Cruiser',\n 'attributesDiff': 'Sail',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg1)\n\n arg2 = {'src': '.',\n 'attributesDiff': 'Sail',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg2)\n\n arg3 = {'src': '.',\n 'keyAttributes': 'Cruiser',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg3)\n\n arg4 = {'src': '.',\n 'keyAttributes': 'Cruiser',\n 'attributesDiff': 'Sail'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg4)", "def _validate_branch_args(self) -> None:\n lk = set(self.branch_losses.keys())\n dk = set(self.model._get_inner_keys(self.model.heads))\n has_same_keys = lk == dk\n\n mk = None\n if self.branch_metrics is not None:\n mk = set(self.branch_metrics.keys())\n has_same_keys = dk == lk == mk\n\n ek = None\n if self.branch_loss_params is not None:\n ek = set(self.branch_loss_params.keys())\n has_same_keys = dk == lk == mk == ek\n\n if not has_same_keys:\n raise ValueError(\n \"Got mismatching keys for branch dict args. \"\n f\"Branch losses: {lk}. \"\n f\"Branch loss params: {ek}. \"\n f\"Decoder branches: {dk}. \"\n f\"Metrics: {mk}. \"\n f\"(`metrics`, and `branch_loss_params` can be None)\"\n )", "def _is_valid(self):\n # Test vol_id:\n assert isinstance(self.volume_id, int), ('The volume id vol_id must be an '\n f'integer, but {self.volume_id} was '\n 'given.')\n assert self.volume_id > 0, ('The volume id vol_id must be greater zero, '\n f'but {self.volume_id} was given.')\n\n # Test if ROI function is defined properly:\n assert callable(self.roi), ('roi must be a callable function '\n 'which depends on x,y,z.')\n\n # Testing the electric field:\n if not (callable(self.electric_field) or\n isinstance(self.electric_field, (int, float))):\n raise ValueError('e_field must be either a function or '\n 'a constant!')\n\n if callable(self.electric_field):\n args = inspect.getfullargspec(self.electric_field).args\n m = np.all(np.isin(['x', 'y', 'z'], args))\n m = m & (len(args) == 3)\n assert m, ('Wrong arguments for e_field. Expected arguments: '\n f'\"x\", \"y\" and \"z\" but {args} were given.')\n # Cannot add a specific if **kwargs are valid properties. Cannot\n # inspect nestpy functions.", "def _verify_arguments(self, kwargs: dict[str, Any]):\n geom_stat_args = kwargs.keys() | self._stat._kwargs.keys()\n unknown = (\n geom_stat_args\n - self.aesthetics()\n - self.DEFAULT_PARAMS.keys() # geom aesthetics\n - self._stat.aesthetics() # geom parameters\n - self._stat.DEFAULT_PARAMS.keys() # stat aesthetics\n - { # stat parameters\n \"data\",\n \"mapping\",\n \"show_legend\", # layer parameters\n \"inherit_aes\",\n \"raster\",\n }\n ) # layer parameters\n if unknown:\n msg = (\n \"Parameters {}, are not understood by \"\n \"either the geom, stat or layer.\"\n )\n raise PlotnineError(msg.format(unknown))", "def check_params(self):\n raise NotImplementedError", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def _getIsValidParameters(self):\n return True, ''", "def test_addr_state_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_state(input_val)\n self.assertEqual(output_val, self.line.addr_state)", "def _arguments_valid(self) -> bool:\n return self.find and self.near and self.max_results >= 1" ]
[ "0.65441775", "0.6414534", "0.61819196", "0.6114851", "0.6088804", "0.5971366", "0.59573877", "0.5891864", "0.5886692", "0.5885214", "0.58631516", "0.580721", "0.5800338", "0.5740057", "0.57381886", "0.57213455", "0.57193244", "0.5703734", "0.5701963", "0.56963843", "0.5695786", "0.5688249", "0.5663592", "0.56631726", "0.5653675", "0.5643725", "0.5633032", "0.5625862", "0.562345", "0.56126744" ]
0.77645344
0
Returns the singlet name as stored in the thermodictionary
def get_singlet_name(orig_name): return "singlet_{}".format(orig_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self) -> str:\n return f\"{self._inst} {self._sid_data['sid']} {self._data[self._sid_data['sid_name']]}\"", "def species_name(self):\n return self.get(self._names[\"species_name\"])", "def get_name():", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")" ]
[ "0.6928742", "0.67757905", "0.6712813", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947", "0.66612947" ]
0.7363678
0
Delete a previously created rolemenu
async def rolemenu_delete(self, interaction: discord.Interaction, name: str): doc = await self.db.find_one({ "guild_id": interaction.guild.id, "name": name }) if not doc: return await interaction.response.send_message( "Role menu with that name does not exist.", ephemeral=True) await interaction.response.defer(ephemeral=True) await self.db.delete_one({"_id": doc["_id"]}) await interaction.followup.send("Role menu removed.", ephemeral=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_menu():", "async def roledelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete: {role}\")", "def remove_menu(menu_name):\n\n pass", "async def rolemenu_remove_role(self, interaction: discord.Interaction,\n name: str, role: str):\n try:\n role_id = int(role)\n except ValueError:\n return await interaction.response.send_message(\n \"The role provided \"\n \"is not valid. Make sure that you either select one from the \"\n \"options that the autocomplete provides, or that you \"\n \"provide the role's ID\",\n ephemeral=True)\n doc = await self.db.find_one({\n \"guild_id\": interaction.guild.id,\n \"name\": name\n })\n if not doc:\n return await interaction.response.send_message(\n \"No role menu with that name exists.\", ephemeral=True)\n await interaction.response.defer(ephemeral=True)\n for role_doc in doc[\"roles\"]:\n if role_doc[\"id\"] == role_id:\n break\n else:\n return await interaction.followup.send(\n \"Role not found in that menu\")\n await self.db.update_one({\"_id\": doc[\"_id\"]},\n {\"$pull\": {\n \"roles\": role_doc\n }})\n doc = await self.db.find_one({\"_id\": doc[\"_id\"]})\n await interaction.followup.send(\"Role removed from the menu.\")\n menu = Menu(self, interaction.guild, doc)\n await menu.update()", "def delete_role(id):\r\n check_admin()\r\n\r\n role = Role.query.get_or_404(id)\r\n db.session.delete(role)\r\n db.session.commit()\r\n flash('You have successfully deleted the role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_roles'))\r\n\r\n return render_template(title=\"Delete Role\")", "def main_role_delete(\n client: CitusCloudMgmt,\n **opts: tp.Any\n) -> None:\n\n id_ = opts[\"id\"]\n client.delete_role(opts[\"formation\"], id_)\n logger.info(f\"Deleted role with id=\\\"{id_}\\\"\")", "def _delete_roles(self):\n for role in self.roles:\n role.delete()", "def test_delete_role(self):\n pass", "def delete_role(id):\n\tcheck_admin()\n\trole = Role.query.get_or_404(id)\n\tdb.session.delete(role)\n\tdb.session.commit()\n\tflash(\"You have successfully deleted the role from the database\")\n\n\t#redirect to the roles page\n\treturn redirect(url_for('admin.list_roles'))\n\n\treturn render_template(title = \"Delete Role\")", "def delete_role(role):\n fallback = Role.load_cli_user()\n\n def _del(cls, col):\n pq = db.session.query(cls)\n pq = pq.filter(col == role.id)\n\n def _repo(cls, col):\n pq = db.session.query(cls).filter(col == role.id)\n pq.update({col: fallback.id}, synchronize_session=False)\n\n _del(Permission, Permission.role_id)\n db.session.delete(role)\n db.session.commit()", "def remove_menu_item(menu_item_name, parent_menu):\n\n pass", "def delete_role(self, name): # NOQA\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n role_record = self.get_role(name)\n self.client.delete_resource(role_record.get('href'))", "async def on_guild_role_delete(role):\r\n\r\n if role.guild.id not in RULES:\r\n return\r\n\r\n for target, rolesets in RULES[role.guild.id].items():\r\n if role == target:\r\n del RULES[role.guild.id][target]\r\n continue\r\n for i, roles in enumerate(rolesets):\r\n if role in roles:\r\n RULES[role.guild.id][target][i].remove(role)", "def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "def test_ipam_roles_delete(self):\n pass", "def delete(self, role_id):\n self.client.delete_role(role_id)", "async def on_guild_role_delete(self, role):\n channel = self.client.get_channel(serverlogs.getChannel(role.guild.id, \"roles\"))\n if channel is not None:\n await self.log_role(role=role, type='Delete', channel=channel, guild=role.guild)", "def delete(self, app, role, privilege):\n \n # check user's privileges\n h.checkAccess('delete')\n\n model = RolesModel()\n model.deletePrivilege( app, role, privilege )\n\n # returns empty reply", "def restaurantMenuItemDelete(restaurant_id, menu_id):\n try:\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n menuItem = session.query(MenuItem).filter_by(id=menu_id).one()\n if request.method == 'POST':\n session.delete(menuItem)\n session.commit()\n\n flash('Menu Item Successfully Deleted', 'menu')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('menuItemDelete.html', menuItem=menuItem, restaurant=restaurant)\n\n except exc.NoResultFound:\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))", "async def deleteReactionMenu(menuID: int):\n menu = botState.reactionMenusDB[menuID]\n try:\n await menu.msg.delete()\n except NotFound:\n pass\n if menu.msg.id in botState.reactionMenusDB:\n del botState.reactionMenusDB[menu.msg.id]", "def remove_trainee(role_id):\n\n role = Role.query.get(role_id)\n if role is None or role.role_id != RoleIds.Trainee:\n flash(\"Role invalide\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n if role.activity_type not in current_user.get_supervised_activities():\n flash(\"Non autorisé\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n db.session.delete(role)\n db.session.commit()\n\n return redirect(url_for(\".leader_list\"))", "def delete_token_role(self, role):\n return self.delete('auth/token/roles/{0}'.format(role))", "def delete_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover", "def test_delete_namespaced_role(self):\n pass", "def deleteRole(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_00_menu_deletion(self):\r\n cr, uid, Menus = self.cr, self.uid, self.Menus\r\n\r\n # Generic trick necessary for search() calls to avoid hidden menus \r\n ctx = {'ir.ui.menu.full_list': True}\r\n\r\n root_id = Menus.create(cr, uid, {'name': 'Test root'})\r\n child1_id = Menus.create(cr, uid, {'name': 'Test child 1', 'parent_id': root_id})\r\n child2_id = Menus.create(cr, uid, {'name': 'Test child 2', 'parent_id': root_id})\r\n child21_id = Menus.create(cr, uid, {'name': 'Test child 2-1', 'parent_id': child2_id})\r\n\r\n all_ids = [root_id, child1_id, child2_id, child21_id]\r\n\r\n # delete and check that direct children are promoted to top-level\r\n # cfr. explanation in menu.unlink()\r\n Menus.unlink(cr, uid, [root_id])\r\n\r\n remaining_ids = Menus.search(cr, uid, [('id', 'in', all_ids)], order=\"id\", context=ctx)\r\n self.assertEqual([child1_id, child2_id, child21_id], remaining_ids)\r\n\r\n orphan_ids = Menus.search(cr, uid, [('id', 'in', all_ids), ('parent_id', '=', False)], order=\"id\", context=ctx)\r\n self.assertEqual([child1_id, child2_id], orphan_ids)", "async def deleterole(self, ctx: context.CustomContext, *, role: str):\n\n try:\n selfrole = await Fuzzy[Selfrole].convert(ctx, role)\n except exceptions.NotFoundError:\n return await ctx.send(\n f\"{config.NO} This server has no selfrole that matches `{role}`.\"\n )\n\n if selfrole.role:\n hard_delete = await ctx.confirm(\n f\"{config.USER_INTERACTION_REQUIRED} Should I also delete the \"\n f\"Discord role `{selfrole.role.name}`, instead of just removing the \"\n f\"selfrole from the list of selfroles in `{config.BOT_PREFIX}roles`?\"\n )\n else:\n hard_delete = False\n\n await self.bot.db.execute(\n \"DELETE FROM selfrole WHERE guild_id = $1 AND role_id = $2\",\n ctx.guild.id,\n selfrole.role.id,\n )\n\n if hard_delete:\n try:\n await selfrole.role.delete()\n except discord.Forbidden:\n raise exceptions.ForbiddenError(\n exceptions.ForbiddenTask.DELETE_ROLE, detail=selfrole.role.name\n )\n\n return await ctx.send(\n f\"{config.YES} The `{role}` selfrole and its Discord role were deleted.\"\n )\n\n await ctx.send(\n f\"{config.YES} The `{role}` selfrole was removed from the `{config.BOT_PREFIX}roles` list but \"\n f\"I did not delete its Discord role.\"\n )", "def role_delete(\n login_manager: LoginManager, *, role_id: str, endpoint_id: uuid.UUID\n) -> None:\n transfer_client = login_manager.get_transfer_client()\n res = transfer_client.delete_endpoint_role(endpoint_id, role_id)\n display(res, text_mode=TextMode.text_raw, response_key=\"message\")", "async def deleteRole(self, ctx, reason=\"No reason available\"):\n for role in ctx.guild.roles:\n if role.name == self.categoryName:\n try:\n await role.delete(reason=reason)\n except discord.errors.Forbidden:\n self.msgToDelete.append(await ctx.message.channel.send(\n \"Erreur, permission non accordée, la suppression des rôles n'est pas complète.\"))\n print(\"Deleted all roles.\")", "def delete_role(role_id):\n\tsession = get_session()\n\tsession.delete(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id))" ]
[ "0.7695256", "0.71618533", "0.7062838", "0.69895333", "0.6775688", "0.675535", "0.6746418", "0.67404985", "0.67155915", "0.6577805", "0.652319", "0.64278144", "0.64047396", "0.6371427", "0.63324994", "0.63156474", "0.63074523", "0.63068837", "0.6301148", "0.6283026", "0.62599564", "0.6245142", "0.6216083", "0.6208842", "0.6150055", "0.61008584", "0.6094291", "0.6087382", "0.6043856", "0.6022272" ]
0.78972113
0
Remove a role from a menu
async def rolemenu_remove_role(self, interaction: discord.Interaction, name: str, role: str): try: role_id = int(role) except ValueError: return await interaction.response.send_message( "The role provided " "is not valid. Make sure that you either select one from the " "options that the autocomplete provides, or that you " "provide the role's ID", ephemeral=True) doc = await self.db.find_one({ "guild_id": interaction.guild.id, "name": name }) if not doc: return await interaction.response.send_message( "No role menu with that name exists.", ephemeral=True) await interaction.response.defer(ephemeral=True) for role_doc in doc["roles"]: if role_doc["id"] == role_id: break else: return await interaction.followup.send( "Role not found in that menu") await self.db.update_one({"_id": doc["_id"]}, {"$pull": { "roles": role_doc }}) doc = await self.db.find_one({"_id": doc["_id"]}) await interaction.followup.send("Role removed from the menu.") menu = Menu(self, interaction.guild, doc) await menu.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_menu(menu_name):\n\n pass", "async def rolemenu_delete(self, interaction: discord.Interaction,\n name: str):\n doc = await self.db.find_one({\n \"guild_id\": interaction.guild.id,\n \"name\": name\n })\n if not doc:\n return await interaction.response.send_message(\n \"Role menu with that name does not exist.\", ephemeral=True)\n await interaction.response.defer(ephemeral=True)\n await self.db.delete_one({\"_id\": doc[\"_id\"]})\n await interaction.followup.send(\"Role menu removed.\", ephemeral=True)", "def remove_role(self, role):\n if role.name in [r.name for r in self.roles]:\n remaining_if_any_roles = [r.to_python() for r in self.roles if not r.name == role.name]\n if remaining_if_any_roles:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$set': {'roles': remaining_if_any_roles}})\n else:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$unset': {'roles': 1}})", "def remove_menu_item(menu_item_name, parent_menu):\n\n pass", "def remove_role():\n headers = {\"X-Vault-Token\": args.x_vault_token}\n url = \"{0}/auth/{1}/role/{2}\".format(args.vault_url, args.k8s_cluster_name, args.k8s_namespace)\n print 'Removing role {0} for {1}'.format(args.k8s_namespace, args.k8s_cluster_name)\n send_delete(url=url, headers=headers)", "async def command_unassign_role(self, context, role: str):\n try:\n await context.author.remove_roles(discord.utils.get(context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be unassigned')\n print(f'Errored in command_unassign_role.', e)", "async def remove_role(self, *, reason: str = None):\n await config.member(self.member).set_raw(str(self.role.id), value=None)\n if self.role in self.member.roles:\n try:\n await self.member.remove_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass", "def remove_role(self, principal, role):\n return permissions.utils.remove_local_role(self, principal, role)", "async def roledelete(ctx):\r\n await ctx.message.delete()\r\n roles = ctx.guild.roles\r\n roles.pop(0)\r\n for role in roles:\r\n if ctx.guild.roles[-1] > role:\r\n try:\r\n await role.delete()\r\n except:\r\n print(f\"{Fore.RED}[-]ROLE => {Fore.RESET}Failed to delete: {role}\")", "async def removerole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removerole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"{member} doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def removerole(self, ctx, rolename, user: discord.Member=None):\n server = ctx.message.server\n author = ctx.message.author\n\n role = self._role_from_string(server, rolename)\n if role is None:\n await self.bot.say(\"Role not found.\")\n return\n\n if user is None:\n user = author\n\n if role in user.roles:\n try:\n await self.bot.remove_roles(user, role)\n await self.bot.say(\"Role successfully removed.\")\n except discord.Forbidden:\n await self.bot.say(\"I don't have permissions to manage roles!\")\n else:\n await self.bot.say(\"User does not have that role.\")", "def remove_role(self, name):\n role = Role.by_name(name)\n if not role:\n return\n if role in self.roles:\n self.roles.remove(role)", "def delete_role(role):\n fallback = Role.load_cli_user()\n\n def _del(cls, col):\n pq = db.session.query(cls)\n pq = pq.filter(col == role.id)\n\n def _repo(cls, col):\n pq = db.session.query(cls).filter(col == role.id)\n pq.update({col: fallback.id}, synchronize_session=False)\n\n _del(Permission, Permission.role_id)\n db.session.delete(role)\n db.session.commit()", "def removeRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='remove', modelType='role')", "def remove_trainee(role_id):\n\n role = Role.query.get(role_id)\n if role is None or role.role_id != RoleIds.Trainee:\n flash(\"Role invalide\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n if role.activity_type not in current_user.get_supervised_activities():\n flash(\"Non autorisé\", \"error\")\n return redirect(url_for(\".leader_list\"))\n\n db.session.delete(role)\n db.session.commit()\n\n return redirect(url_for(\".leader_list\"))", "def delete_menu():", "async def unset(self, ctx, *, role_name: str):\n role_name = role_name.lower()\n\n if isinstance(ctx.message.channel, discord.DMChannel):\n guild = await self.get_server_from_pm(ctx)\n else:\n guild = ctx.guild\n\n if guild is None:\n return\n\n await self.remove_role(ctx, role_name, guild)", "def revoke_role(self, role, principal_ids):", "async def massremove(\n self,\n ctx,\n role: discord.Role,\n member: commands.Greedy[discord.Member],\n ):\n\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help massremove```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in member:\n if role not in i.roles:\n await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{i}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(member)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "def _restoreRole(self, oldRole, args):\n if oldRole:\n args['role'] = oldRole\n else:\n del args['role']", "def test_remove_role_from_project_member(self):\n pass", "async def removerole(self, ctx, role: discord.Role):\n guild = ctx.message.guild\n excluded_roles = await self.config.guild(guild).excluded_roles()\n\n if role.id in excluded_roles:\n excluded_roles.remove(role.id)\n await self.config.guild(guild).excluded_roles.set(excluded_roles)\n await ctx.send(\"Removed %s from role exclusion list.\" % role.name)\n else:\n await ctx.send(\"%s is not an excluded role.\" % role.name)", "def delete_role(self, name): # NOQA\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n role_record = self.get_role(name)\n self.client.delete_resource(role_record.get('href'))", "async def remove_roles(self, ctx: commands.Context, *roles: discord.Role):\n if not roles:\n return await ctx.send_help()\n message = \"\"\n removed = []\n not_found = []\n async with self.config.guild(ctx.guild).autoroles() as roles_list:\n for role in roles:\n if role.id in roles_list:\n roles_list.remove(role.id)\n removed.append(role.name)\n else:\n not_found.append(role.name)\n if not_found:\n message += \"\\nRole(s) not found in autorole list: {roles}\".format(\n roles=humanize_list(not_found)\n )\n if removed:\n message += \"\\nRole(s) remove from autorole list: {roles}\".format(\n roles=humanize_list(removed)\n )\n if message:\n for line in pagify(message):\n await ctx.send(line)", "def clean_role():", "def _remove_role(contest, user, role_class):\n user_biv_id = _lookup_user(user).biv_id\n role = role_class.query.select_from(pam.BivAccess).filter(\n pam.BivAccess.source_biv_id == user_biv_id,\n pam.BivAccess.target_biv_id == role_class.biv_id\n ).one()\n db.session.delete(\n pam.BivAccess.query.filter(\n pam.BivAccess.source_biv_id == contest,\n pam.BivAccess.target_biv_id == role.biv_id\n ).one()\n )", "async def on_guild_role_delete(role):\r\n\r\n if role.guild.id not in RULES:\r\n return\r\n\r\n for target, rolesets in RULES[role.guild.id].items():\r\n if role == target:\r\n del RULES[role.guild.id][target]\r\n continue\r\n for i, roles in enumerate(rolesets):\r\n if role in roles:\r\n RULES[role.guild.id][target][i].remove(role)", "def delete_token_role(self, role):\n return self.delete('auth/token/roles/{0}'.format(role))", "async def removepersistrole(self, ctx, member: discord.Member, role: discord.Role):\n role = discord.utils.get(ctx.guild.roles, id=role.id)\n\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to add this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removepersistrole```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n if role not in member.roles:\n return await ctx.channel.send(\n embed=discord.Embed(\n title=f\"*{member}* doesn't have *{role}* Role!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.greyple(),\n )\n )\n\n await member.remove_roles(role)\n persistent_role = Roles(\n bot=self.bot,\n guild_id=ctx.guild.id,\n user_id=member.id,\n roles=role.id,\n )\n # Post to db for persistent role\n await persistent_role.delete()\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"Persisting Role *{role}* has been removed from *{member}*\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )", "async def role(ctx, role: discord.Role = None):\n if role is None:\n await ctx.send(\"List of assignable roles: \" + str(allowed_roles))\n if role.name in allowed_roles:\n if not role in ctx.message.author.roles:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Role added.\")\n else:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Role removed.\") \n else:\n await ctx.send(\"That role doesn't exist, or you don't have permission to modify it.\")" ]
[ "0.76884437", "0.747322", "0.7270676", "0.71565264", "0.712966", "0.70907384", "0.69873995", "0.6899004", "0.6845092", "0.68170446", "0.6810149", "0.68007976", "0.67884195", "0.67334276", "0.6684447", "0.6681211", "0.66589636", "0.66485894", "0.6637888", "0.662638", "0.66152674", "0.6581898", "0.6537222", "0.6490438", "0.64689606", "0.6457329", "0.6389867", "0.6386361", "0.6377928", "0.6347129" ]
0.81111044
0
to evaluate a postfix expression into a value. Use the postfix_valid function described below to check the validity of the expression
def postfix_eval(postfix_expr): s = StackArray() expr = postfix_expr.split() for token in expr: if token[0] in '0123456789': res = token s.push(res) else: # token is operator op2 = s.pop() op2 = float(op2) if s.is_empty(): # token is ~ # could also be ~ for non-empty stack res = -1 * op2 else: op1 = s.pop() op1 = float(op1) if token == '^': res = op1 ** op2 elif token == '~': s.push(op1) res = -1 * op2 elif token == '*': res = op1 * op2 elif token == '/': if op2 == 0: raise ZeroDivisionError else: res = op1 / op2 elif token == '+': res = op1 + op2 else: # token == '-' res = op1 - op2 s.push(res) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluatePostfixExp(self, postfixExpr):\n\n operandStack = []\n tokenList = postfixExpr.split(\" \")\n\n for token in tokenList:\n if self.isOperand(token):\n if \".\" in token:\n token = float(token)\n else:\n token = int(token)\n operandStack.append(token)\n else: # token is an operator\n operand2 = operandStack.pop()\n operand1 = operandStack.pop()\n try:\n result = self.applyOperator(operand1, operand2, token)\n except Exception as error:\n print(\"Invalid input. Please enter a valid arithmetic expression.\") # Most likely division by\n # zero error.\n return\n operandStack.append(result)\n return operandStack.pop()", "def eval_postfix(s):\n stack = Stack()\n \n s = s.split()\n for i in s:\n \tif operator(i) == False:\n \t\tstack.push(int(i))\n \telse:\n \t\tb = stack.pop()\n \t\ta = stack.pop()\n \t\tresult = evaluate(a, i, b)\n \t\tstack.push(result)\n return stack.pop()", "def postfix_eval(input_str):\n\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n if input_str is None: raise PostfixFormatException\n # create list of operands and operators\n term_list = input_str.split()\n # initialize stack large enough to contain all operands\n operand_stack = Stack(2*len(term_list)//3+1)\n # iterate over term_list\n for term in term_list:\n # check for operatorm, evaluate operators on A & B if True\n if operator_present(term) is True:\n if operand_stack.size()<2: \n raise PostfixFormatException(\"Insufficient operands\")\n B = operand_stack.pop()\n A = operand_stack.pop()\n operand_stack.push(\n calculate(\n A, # A\n B, # B\n term) # operator\n )\n # check for operand, push to stack if True\n elif operand_present(term) is True:\n operand_stack.push(term)\n else: raise PostfixFormatException(\"Invalid token\")\n if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n return operand_stack.pop()", "def postfix_valid(postfix_expr):\n expr = postfix_expr.split()\n count = 0\n if postfix_expr == \"\":\n return False\n for token in expr:\n if token[0] in '0123456789':\n count += 1\n elif token == '~':\n pass\n else: # all other binary operators\n count -= 1\n if count < 0:\n return False\n if count == 1:\n return True\n return False", "def postfix_eval(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n raise PostfixFormatException('Insufficient operands')\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if i in op_list:\n try:\n num_val = stack.pop()\n num_val_initial = stack.pop()\n except IndexError:\n raise PostfixFormatException(\"Insufficient operands\")\n if i == \"+\":\n stack.push(num_val_initial + num_val)\n if i == \"-\":\n stack.push(num_val_initial - num_val)\n if i == \"*\":\n stack.push(num_val_initial * num_val)\n if i == \"/\":\n if num_val == 0:\n raise ValueError(\"0 not divisible\")\n stack.push(num_val_initial / num_val)\n if i == \"**\":\n stack.push(num_val_initial ** num_val)\n if i == \"<<\":\n t1 = type(num_val)\n t2 = type(num_val_initial)\n if t1 == float or t2 == float:\n raise PostfixFormatException(\"Illegal bit shift operand\")\n stack.push(num_val_initial << num_val)\n if i == \">>\":\n t1 = type(num_val)\n t2 = type(num_val_initial)\n if t1 == float or t2 == float:\n raise PostfixFormatException(\"Illegal bit shift operand\")\n stack.push(num_val_initial >> num_val)\n elif new_val.isdigit():\n if \".\" in i:\n stack.push(float(i))\n else:\n stack.push(int(i))\n else:\n raise PostfixFormatException(\"Invalid token\")\n val = stack.pop()\n if not stack.is_empty():\n raise PostfixFormatException(\"Too many operands\")\n return val", "def evaluateExpression(self, userExpression):\n return self.evaluatePostfixExp(userExpression)", "def evaluate_postfix(list_input):\n stack_values = []\n\n for item in list_input:\n # debug stuff\n # print \"item\", item\n try:\n item_value = float(item)\n has_value = True\n except ValueError:\n has_value = False\n\n # value, operand, put on stack\n if has_value:\n stack_values.append(item_value)\n has_value = False\n\n # operator, pull two operands from stack\n elif (has_value == False\n and len(stack_values) >= 2):\n second_value = stack_values.pop()\n first_value = stack_values.pop()\n result = evaluate_op(item,\n first_value,\n second_value)\n stack_values.append(result)\n # debug stuff\n # print \"midstep\", result\n\n return stack_values.pop()", "def evaluatePostfix(postfix, variableList, variableLocation, methodVariables, output):\n\n stack = [] # Stack that will contain our pushed operands from the postfix expression\n immediateCount = 0 # Keeps count of how many immediate values are being expressed (not variables)\n sourceRegister = 1 # Source register starts at 1: \"B\", and increments as needed\n destRegister = 0 # Destination register starts at 0: 'A\" and increments as needed\n immFlag = 0 # Used to determine whether source or destination register holds an immediate\n\n for element in postfix:\n # Evaluate each postfix element one by one to determine appropriate action\n\n if sourceRegister > 6 or destRegister > 6:\n # We cap the total amount of registers used to 7 (0-6)\n raise ValueError(\"Too many operands in formula.\")\n\n if element in OPERATIONS:\n # Here, our element is an operator. This means we need to pop the top two values from the stack and\n # execute the given operation.\n operand1, operand2 = stack.pop(), stack.pop()\n\n if operand1 in variableList:\n # The operand is in the list of local variables, so we read the value from memory\n output.write(\" MEMR [4] #\" + str(variableLocation[operand1]) + \" $\" + REGISTERS[sourceRegister] + \"\\n\")\n operand1 = REGISTERS[sourceRegister]\n\n elif operand1 in methodVariables:\n # The operand is in the list of arguments passed into the method. We consult the methodVariables list\n # to determine the appropriate offset from the stack pointer register S2.\n output.write(\" MOV $A2 $S2\\n\")\n output.write(\" ADD #\" + str(int(methodVariables[operand1][1]) * 4) + \" $A2\\n\")\n output.write(\" MEMR [4] $A2 $\" + REGISTERS[sourceRegister] + \"\\n\")\n operand1 = REGISTERS[sourceRegister]\n\n elif operand1 in REGISTER_NAMES:\n # This is simply a register that was pushed onto the stack. We can keep it as is\n pass\n\n else:\n # The operand is an immediate value. We test to see if it's a valid integer\n try:\n isinstance(operand1, int)\n immediateCount += 1\n immFlag = 1\n except ValueError as e:\n raise ValueError(\"Invalid operand\")\n\n if operand2 in variableList:\n # The operand is in the list of local variables, so we read the value from memory\n output.write(\" MEMR [4] #\" + str(variableLocation[operand2]) + \" $\" + REGISTERS[destRegister] + \"\\n\")\n operand2 = REGISTERS[destRegister]\n\n elif operand2 in methodVariables:\n # The operand is in the list of arguments passed into the method. We consult the methodVariables list\n # to determine the appropriate offset from the stack pointer register S2.\n output.write(\" MOV $B2 $S2\\n\")\n output.write(\" ADD #\" + str(int(methodVariables[operand2][1]) * 4) + \" $B2\\n\")\n output.write(\" MEMR [4] $B2 $\" + REGISTERS[destRegister] + \"\\n\")\n operand2 = REGISTERS[destRegister]\n\n elif operand2 in REGISTER_NAMES:\n # This is simply a register that was pushed onto the stack. We can keep it as is\n pass\n\n else:\n # The operand is an immediate value. We test to see if it's a valid integer\n try:\n isinstance(operand2, int)\n immediateCount += 1\n immFlag = 2\n except ValueError as e:\n raise ValueError(\"Invalid operand\")\n\n if immediateCount == 2:\n # If we have two immediate values, we don't really need to calculate the arithmetic in Capua ASM.\n # We discretely do the calculations in the background and push the value to the stack. This avoids\n # unnecessary processing.\n try:\n stack.append(int(OPERATIONS[element]['function'](float(operand2), float(operand1))))\n\n except ZeroDivisionError:\n raise ValueError(\"Error: Division by zero! - {} {} {}\".format(operand2, element, operand1))\n\n else:\n if immediateCount == 1:\n # only one of the operands was an immediate value. We determine which one is the immediate value,\n # as the correct instruction output depends on it.\n if immFlag == 1:\n output.write(\" MOV #\" + str(int(operand1)) + \" $\" + REGISTERS[sourceRegister] + \"\\n\")\n operand1 = REGISTERS[sourceRegister]\n\n elif immFlag == 2:\n output.write(\" MOV #\" + str(int(operand2)) + \" $\" + REGISTERS[destRegister] + \"\\n\")\n operand2 = REGISTERS[destRegister]\n\n else:\n # No operands were immediate values. We can do the arithmetic operation as is.\n # We move the source and destination registers up one letter for the next operation\n sourceRegister += 1\n destRegister += 1\n\n output.write(\" \" + INSTRUCTIONS[element] + \" $\" + str(operand1) + \" $\" + str(operand2) + \"\\n\")\n stack.append(operand2)\n\n immediateCount = 0\n\n else:\n # We have an operand to push onto the stack\n stack.append(element)\n\n if len(stack) != 1:\n # If the stack has more than or less than one element, the expression is incorrect.\n raise ValueError(\"invalid expression.\")\n\n # our result is then \"saved\" into register A. The assignment can now be completed.\n result = stack.pop()\n\n if result in REGISTER_NAMES:\n # If we just have a register at the bottom of the stack, we assume the result is already in register A\n pass\n\n else:\n try:\n isinstance(int(result), int)\n output.write(\" MOV #\" + str(result) + \" $A\\n\")\n except ValueError as e:\n raise ValueError(\"Invalid mathematical expression\")", "def infix_to_postfix(input_str): # postfix requires that all operators proceed after the two operands that they work on\n\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n if input_str is None: raise ValueError\n # Split input string\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # Create output list, will be fed to postfix_eval() at end\n output_list = []\n # initialize stack large enough to contain all operators\n operator_stack = Stack(len(term_list)//3+1)\n for term in term_list:\n # check for operand, if present append to output list\n if operand_present(term) is True:\n output_list.append(term)\n # check for operator\n elif operator_present(term) or term == '(' or term == ')':\n #if operand_stack.size()<2: \n # raise PostfixFormatException(\"Insufficient operands\")\n # Check for open parentheses\n if term == '(': operator_stack.push(term)\n # Check for closing parentheses, pop stack until open parentheses found\n elif term == ')':\n while 1:\n token = operator_stack.pop()\n if token != '(': \n output_list.append(token)\n else: break\n # Otherwise push to stack but pop any higher/equal order operators\n else:\n sort_operators(term, operator_stack, output_list)\n #print(operator_stack.peek())\n #else: raise PostfixFormatException(\"Invalid token\")\n #if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def eval_postfix(s):\n stack = Stack()\n for x in s.split(): # rozděl 's' dle mezer\n if x == '+':\n stack.push(stack.pop() + stack.pop())\n elif x == '-':\n stack.push(-stack.pop() + stack.pop())\n elif x == '*':\n stack.push(stack.pop() * stack.pop())\n elif x == '/':\n second = stack.pop()\n stack.push(stack.pop() / second)\n else:\n stack.push(float(x))\n return stack.pop()", "def postfixCalc(self,tokens):\n if len(tokens) == 0:\n return 0\n stack = []\n # while expr is not empty\n while len(tokens)>0:\n toke = tokens.pop(0)\n # if token is a number push it onto the stack\n if isFloat(toke):\n stack.append(float(toke))\n # if token is a special number push it onto the stack\n elif toke in Calculator.specialNumbers:\n stack.append(Calculator.specialNumbers[toke])\n else:\n # Operators take 2 inputs, functions take 1 input except root which takes 2\n if toke in Calculator.operators or toke == 'root':\n n = 2\n elif toke in Calculator.functions:\n n = 1\n # If the length of the stack is less than the required number of operators the user has not \n # input enough values.\n if len(stack)<n:\n return \"Too Few Error\"\n # Pop the top n numbers from the stack\n popedVals = []\n for i in range(n):\n popedVals.append(stack.pop())\n # Evaluate the operator using the number(s) that were popped, and push back onto the stack\n if n == 2 and toke in Calculator.operators:\n stack.append(Calculator.operators[toke][0](popedVals[1], popedVals[0]))\n elif n == 2:\n stack.append(Calculator.functions[toke](popedVals[1], popedVals[0]))\n elif n == 1:\n stack.append(Calculator.functions[toke](popedVals[0]))\n # If there is more than one value left on the stack the user has input too many values\n if len(stack) > 1:\n return \"Too Many Error\"\n # Return the value on the stack (should only be 1 value left)\n return stack[-1]", "def evaluate_infix(string):\n return postfix(infix_to_postfix(string))", "def infix_to_postfix(self, exp):\n\n try:\n for i in exp:\n #if the character is an operand output it\n if self.is_operand(i):\n self.postfix.append(i)\n\n #if the character is '(' push it\n elif i is '(':\n self.push('(')\n\n elif i is ')':\n #if the character is ')\" pop until we encounter '(' in the stack\n while not self.isEmpty() and self.peek() is not '(':\n self.postfix.append(self.pop())\n if not self.isEmpty() and self.peek() is not '(':\n return -1\n else:\n self.pop()\n\n #if an operator is encountered\n else:\n while not self.isEmpty() and self.peek() is not '(' and self.not_greater(i):\n self.postfix.append(self.pop())\n self.push(i)\n while not self.isEmpty():\n self.postfix.append(self.pop())\n\n return ''.join(self.postfix)\n\n except Exception as e:\n print(\"Error occurred while performing infix to postfix conversion :\", e)\n traceback.print_exc()\n return -1", "def infix_to_postfix(self, expr: str) -> str:\n\n # The stack that we will be performing operations on\n stack: list[str] = []\n\n # The output\n output: str = \"\"\n\n # We always need surrounding parentheses\n expr = f\"({expr})\"\n\n # The tokenized expression\n expr = self.tokenize_expr(expr)\n\n\n \n # For every token in expression\n for token in expr:\n # Check what token it is\n if token == \"(\":\n # If it is a (, then append to stack\n stack.append(\"(\")\n elif token == \")\":\n # If it is a ), then iterate over stack\n while stack[-1] != '(':\n # Popping the last item from stack, to output\n # Include a trailing space\n # Until the last item in the stack is a (\n output += f\"{stack.pop()} \"\n # Pop the last ( from the stack\n stack.pop()\n elif re.match(r\"[a-zA-Z_][a-zA-Z0-9_]*\", token):\n # If it matches a name/variable\n # Append to output with a trailing space\n output += f\"{token} \"\n elif re.match(r\"\\d+\",token):\n # If it is a number\n # Then append with a trailing space\n output += f\"{token} \"\n else:\n if self.is_token(token):\n # If it is a token\n # Pop it from the stack while\n # It's priority is smaller than\n # the last priority of the stack\n # Put it into output with a trailing space\n while self.get_token_priority(token) <= self.get_token_priority(stack[-1]):\n output += f\"{stack.pop()} \"\n # And append token to stack\n stack.append(token)\n # Return output\n return output", "def infixToPostfix(infix):\n postfix = []\n stackArr = []\n scanOperand = False\n hasIntegral = False\n hasDecimal = False\n currentOperand = 0\n decimal = 1\n for ch in infix:\n currentPrio = charPrio(ch)\n if currentPrio < 0: # current ele is operand\n if not (ch.isdigit() or ch == '.'):\n inputError()\n return\n if not scanOperand:\n scanOperand = True\n if ch == '.':\n if not hasIntegral:\n formatError()\n return\n hasDecimal = True\n continue\n if hasDecimal:\n if ch == '.':\n formatError()\n return\n currentOperand = currentOperand + 0.1 ** decimal * int(ch)\n decimal += 1\n else:\n if not hasIntegral:\n hasIntegral = True\n currentOperand = currentOperand * 10 + int(ch)\n elif currentPrio == 0:\n # none operation\n pass\n else:\n # and operand into postfix expression\n if scanOperand:\n scanOperand = False\n hasDecimal = False\n hasIntegral = False\n decimal = 1\n postfix.append(currentOperand)\n currentOperand = 0\n # handle operator\n if isEmpty(stackArr):\n push(stackArr, ch) # push into stack\n elif currentPrio > prio[peek(stackArr)]:\n push(stackArr, ch) # push into stack\n elif currentPrio == 1: # ')'\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)]:\n ele = pop(stackArr)\n if ele != '(':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n else:\n break\n else:\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)] and prio[peek(stackArr)] < 5 :\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n push(stackArr, ch) # push into stack\n if scanOperand:\n postfix.append(currentOperand)\n while not isEmpty(stackArr):\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n return postfix", "def infix_to_postfix(expr):\n ops = Stack()\n postfix = []\n toks = expr.split()\n def tests(chr):\n if chr.isdigit():\n postfix.append(chr)\n\n elif chr == '(':\n ops.push('(')\n\n elif ops.peek() == '(' or ops.empty():\n ops.push(chr)\n\n elif chr ==')':\n while ops.peek() != \"(\":\n postfix.append(ops.pop())\n ops.pop()\n\n elif chr in prec and prec[chr] > prec[ops.peek()]:\n ops.push(chr)\n\n elif chr in prec and prec[chr] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(chr)\n\n elif chr in prec and prec[chr] < prec[ops.peek()]:\n postfix.append(ops.pop())\n tests(chr)\n\n for tok in toks:\n tests(tok)\n\n\n while not ops.empty():\n postfix.append(ops.pop())\n\n\n return ' '.join(postfix)", "def toPostfix (self,infix):\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix", "def _get_postfix_notation(self):\n postfix, operators_stack = list(), list() # initialize postfix list and auxiliary stack\n\n for element in self.expression.split():\n if element in self.OPERATORS:\n if operators_stack:\n # while stack isn't empty and \"stack top\" is stronger(e.g. multiplication is stronger than addition)\n # move \"stack top\" into postfix list\n while operators_stack \\\n and operators_stack[-1] in self.OPERATORS \\\n and self.OPERATOR_WEIGHT[operators_stack[-1]] >= self.OPERATOR_WEIGHT[element]:\n postfix.append(operators_stack.pop())\n\n operators_stack.append(element)\n\n elif element == self.BRACKET_LEFT:\n operators_stack.append(element)\n\n elif element == self.BRACKET_RIGHT:\n # searching for left bracket on stack, moving \"stack Top\" to postfix list\n while operators_stack and operators_stack[-1] != self.BRACKET_LEFT:\n postfix.append(operators_stack.pop())\n operators_stack.pop() # remove left bracket\n\n else: # numbers always goes into postfix list\n postfix.append(self._get_number_from_string(element))\n\n if operators_stack: # move others stack elements to postfix list\n postfix.extend(reversed(operators_stack))\n\n return postfix", "def infix_to_postfix(expr):\n # you may find the following precedence dictionary useful\n prec = {'*': 2, '/': 2,\n '+': 1, '-': 1}\n ops = Stack()\n postfix = []\n toks = expr.split()\n ### BEGIN SOLUTION\n opp = {'*', '/','+', '-'}\n for x in toks:\n if str.isdigit(x):\n postfix.append(x)\n elif ops.empty() or ops.peek() == '(':\n ops.push(x)\n elif x == '(':\n ops.push(x)\n elif x == ')':\n while not ops.empty():\n temp = ops.pop()\n if temp == '(':\n break\n else:\n postfix.append(temp)\n elif x in opp:\n while True:\n if prec.get(x) > prec.get(ops.peek()):\n ops.push(x)\n break\n elif prec.get(x) == prec.get(ops.peek()):\n postfix.append(ops.pop())\n ops.push(x)\n break\n elif prec.get(x) < prec.get(ops.peek()):\n postfix.append(ops.pop())\n if ops.empty():\n ops.push(x)\n break\n elif ops.empty():\n break\n\n while True:\n if not ops.empty():\n postfix.append(ops.pop())\n else:\n break\n\n ### END SOLUTION\n return ' '.join(str(x) for x in postfix)", "def calculator(infix_expr):\n\n # Assign precedence values to operators\n prec = {}\n prec['^'] = 4\n prec['*'] = 3\n prec['/'] = 3\n prec['+'] = 2\n prec['-'] = 2\n prec['('] = 1\n\n # Instantiate stacks\n operand_stack = Stack()\n operator_stack = Stack()\n\n try:\n token_list = infix_expr.split()\n logging.debug(\"token_list = {}\".format(token_list))\n except:\n sys.exit(1)\n\n for token in token_list:\n logging.debug(\"token = {}\".format(token))\n if token in '0123456789':\n operand_stack.push(int(token))\n logging.debug(\"operand_stack.push = {}\".format(token))\n elif token == '(':\n operator_stack.push(token)\n logging.debug(\"operator_stack.push = {}\".format(token))\n elif token == ')':\n logging.debug(\"token = {}\".format(token))\n operator_token = operator_stack.pop()\n logging.debug(\"operator_stack.pop = {}\".format(operator_token))\n while operator_token != '(':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"while operator_token != '(':\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_token = operator_stack.pop()\n logging.debug(\"new operator_token = {}\".format(operator_token))\n elif token in '^*/+-':\n while (not operator_stack.isEmpty()) and \\\n (prec[operator_stack.peek()] >= prec[token]):\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"Operator - While:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_stack.push(token)\n logging.debug(\"operator_stack.push(): {}\".format(token))\n else:\n logging.debug(\"else.... exiting....\")\n sys.exit(1)\n\n # Use all remaining operators\n if not operator_stack.isEmpty():\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n logging.debug(\"Remaining Operators:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operand_stack.push(result)\n\n return operand_stack.pop()", "def postfix(t_input):\r\n # guardo se gli elementi contengono caratteri non validi\r\n if is_valid(t_input) == 1:\r\n # restituisco Invalid se sono stati trovati caratteri invalidi\r\n result = \"Invalid\"\r\n return result\r\n\r\n # scorri di nuovo gli elementi\r\n # NOTA: sarebbe piu' efficiente fare un unico ciclo\r\n for element in t_input.strip(\"\\0\").split(\" \"):\r\n if element in [\"-\", \"+\", \"*\", \"/\"]:\r\n # ho trovato operatore, ricavo operandi dallo stack\r\n right_operand = stack.pop()\r\n left_operand = stack.pop()\r\n\r\n # faccio l'operazione che serve\r\n if element == \"-\":\r\n op_result = left_operand - right_operand\r\n elif element == \"+\":\r\n op_result = left_operand + right_operand\r\n elif element == \"*\":\r\n op_result = left_operand * right_operand\r\n else:\r\n op_result = left_operand // right_operand\r\n\r\n if boold:\r\n print(\"[DEBUG] Ho trovato operatore '{}': {} {} {} = {}\".format(element, left_operand, element, right_operand, op_result))\r\n # inserisco nello stack il risultato dell'operazione\r\n stack.push(op_result)\r\n else:\r\n # ho trovato operando, lo metto nello stack\r\n # > NOTA: e' necessaria conversione stringa -> intero\r\n stack.push(int(element))\r\n \r\n if boold:\r\n stack.print()\r\n\r\n # il risultato e' l'ultimo elemento\r\n # > NOTA: e' necessaria conversione intero -> stringa\r\n result = str(stack.pop())\r\n return result", "def toPostfix(infix):\n output = \"\" # Output stack - the numbers in our expression\n operators = \"\" # Operator stack (using string for ease but could be a list)\n precedence = {\"*\": 100, \"/\": 90, \"+\": 80, \"-\": 70, \"(\": 60, \")\": 50} # Operator precedence dictionary - operator characters mapped to an arbitrary numeric value representing their precedence (BOMDAS)\n \n #Loop through characters\n for c in infix:\n #If c is a number\n if (c.isdigit()):\n output += c\n #Else if c is a function - ignoring these for now\n #Else if c is an operator - + - * / might account for x and division ASCII symbol later\n elif c in {\"+\", \"-\", \"*\", \"/\"}:\n # While there is still an operator left at the top of the stack\n # AND the operator at the top of the stack has greater precedence\n # OR the operator at the top of the stack has equal precedence and the token is left associative (don't know what this means, ignoring for now)\n # AND that operator is not a left parenthesis '('\n # Note: \\ tells python that a statement will continue on to the next line\n while len(operators) > 0 and operators[-1] != '(' and precedence[operators[-1]] > precedence[c]:\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # Push it onto the operator stack\n operators += c\n # Else if token is a left parenthesis (\n elif c == \"(\":\n # Push c to operator stack\n operators += c\n elif c == \")\":\n while operators[-1] != \"(\":\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # If there is a left bracket at the top of the stack, remove it\n if operators[-1] == '(':\n # Pop the operator from the operator stack and discard it\n operators = operators[:-1]\n # if there is a function token at the top of the operator stack... (Ignoring this for now)\n \n # If there are any operators left in the stack, append to output\n while len(operators) > 0:\n # Push operator from top of stack to output\n output += operators[-1]\n # Remove top operator from stack\n operators = operators[:-1]\n return output", "def convert_to_postfix(expression):\n infix = list(expression.replace(\" \", \"\"))\n opr_priority = {'!': 4, '*': 3, '+': 2, '>': 1, '=': 1, '(': 0}\n postfix = []\n stack = []\n\n for token in infix:\n if token in string.ascii_uppercase:\n postfix.append(token)\n elif token == '(':\n stack.append(token)\n elif token == ')':\n stack_token = stack.pop()\n while stack_token != '(':\n postfix.append(stack_token)\n stack_token = stack.pop()\n else:\n while stack and (opr_priority[stack[len(stack)-1]] >= opr_priority[token]):\n postfix.append(stack.pop())\n stack.append(token)\n\n while stack:\n postfix.append(stack.pop())\n\n return postfix", "def infixToPostfix(expr, prec):\n ops = Stack()\n postfix = []\n toks = expr.split()\n for t in toks:\n if t.isdigit():\n postfix.append(t)\n elif t == '(':\n ops.push('(')\n elif t == ')':\n op = ops.pop()\n while op != '(':\n postfix.append(op)\n op = ops.pop()\n else:\n while True:\n if ops.empty() or ops.peek() == '(':\n ops.push(t)\n break\n if prec[t] > prec[ops.peek()]:\n ops.push(t)\n break\n elif prec[t] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(t)\n break\n else:\n postfix.append(ops.pop())\n while not ops.empty():\n postfix.append(ops.pop())\n return postfix", "def isPostfixOp(tokens):\n stop = SwiftSupport.getLastOpTokenIndex(tokens)\n if stop == -1:\n return False\n\n start = tokens.index\n prevToken = tokens.get(start - 1)\n nextToken = tokens.get(stop + 1)\n prevIsWS = SwiftSupport.isLeftOperatorWS(prevToken)\n nextIsWS = SwiftSupport.isRightOperatorWS(nextToken)\n result = not prevIsWS and nextIsWS or not prevIsWS and nextToken.type == DOT\n text = tokens.getText(start, stop)\n return result", "def infix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n stack = Stack(30)\n if input_str == '':\n return ''\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n order = {}\n order[\"+\"] = 1\n order[\"-\"] = 1\n order[\"*\"] = 2\n order[\"/\"] = 2\n order[\"**\"] = 3\n order[\"<<\"] = 4\n order[\">>\"] = 4\n pfix_str = ''\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit() and pfix_str == \"\":\n pfix_str = pfix_str + i\n elif i in op_list:\n if not stack.is_empty():\n p = stack.peek()\n while 0 < stack.size():\n p = stack.peek()\n if p == \"(\":\n break\n if i == \"**\":\n if order[p] <= order[i]:\n break\n else:\n p1 = stack.pop()\n pfix_str = pfix_str + \" \" + p1\n elif order[p] < order[i]:\n break\n else:\n p2 = stack.pop()\n pfix_str = pfix_str + \" \" + p2\n stack.push(i)\n elif i == \"(\":\n stack.push(i)\n elif new_val.isdigit():\n pfix_str = pfix_str + \" \" + i\n elif i == \")\":\n p = stack.peek()\n while p != \"(\":\n pfix_str = pfix_str + \" \" + stack.pop()\n if not stack.is_empty():\n p = stack.peek()\n stack.pop()\n while not stack.is_empty():\n pop3 = stack.pop()\n pfix_str = pfix_str + \" \" + pop3\n return pfix_str", "def infix_to_postfix(infix_expr):\n # Append adds new item to list\n # Concat creates a new list every time instead\n\n opstack = StackArray()\n res = []\n lstr = infix_expr.split()\n # l_para = r_para = 0\n # operator precedence dict\n prec = { # higher val = higher prec\n \"(\" : 4,\n \"^\" : 3, # r-to-l (i.e. 2^3^2 = 2^(3^2) )\n \"~\" : 3, # right-to-left (i.e. -3^2 = -9)\n # '*/+-' are associated left to right\n \"*\" : 2,\n \"/\" : 2,\n \"+\" : 1,\n \"-\" : 1\n }\n for token in lstr:\n if token[0] in '0123456789':\n res.append(token)\n # not opstack.is_empty() guards against IndexError on empty peek\n if not opstack.is_empty() and opstack.peek() == '^':\n res.append(opstack.pop())\n if not opstack.is_empty() and opstack.peek() == '~':\n res.append(opstack.pop())\n elif token == '(':\n # l_para += 1\n opstack.push(token)\n elif token == ')':\n # r_para += 1\n # opstack can't be empty for proper formatted input\n while opstack.peek() != '(':\n res.append(opstack.pop())\n opstack.pop() # remove left paran '('\n else: # token is ^ ~ * / + -: <-- operators\n while not opstack.is_empty() and prec[token] <= prec[opstack.peek()]:\n if opstack.peek() == '(':\n break\n elif token == '^' and opstack.peek() == '~':\n break\n else:\n res.append(opstack.pop())\n opstack.push(token)\n # if l_para != r_para:\n # raise SyntaxError\n while not opstack.is_empty():\n res.append(opstack.pop())\n res = \" \".join(res)\n res.strip()\n return res", "def postfix(self,Line):\r\n\r\n stak = []\r\n expression = []\r\n infix = []\r\n i=0\r\n while( i <(len(Line))):\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n if len(stak) > 0:\r\n if (Line[i] == '[') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"dup\") or (stak[len(stak) - 1] == \"sizeof\") or (stak[len(stak) - 1] == \"type\")):\r\n return False\r\n if len(stak) > 0:\r\n if (Line[i] == '(') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"sizeof\")):\r\n return False\r\n if (len(stak) == 0) and (Line[i] == '('):\r\n return False\r\n stak.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(stak) == 0:\r\n return False\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (stak[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ']'):\r\n break\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n\r\n stak = stak[:-1]\r\n if (len(stak) > 0) and (stak[stak.__len__() - 1] == 'dup'):\r\n expression.append(stak[stak.__len__() - 1])\r\n stak = stak[:-1]\r\n elif Line[i] == ',':\r\n if expression.__len__() == 0:\r\n return False\r\n if stak.__len__() != 0:\r\n j = stak.__len__() - 1\r\n while (j >= 0):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if (expression.__len__() > 0)and(expression!=[\"dup\"]):\r\n infix.append(expression)\r\n expression = []\r\n elif Line[i][0].isdecimal():\r\n if Line[i][len(Line[i]) - 1] == 'h':\r\n tmp = extra_functions.is_hexa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n\r\n elif Line[i][len(Line[i]) - 1] == 'o':\r\n tmp = extra_functions.is_octa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'b':\r\n tmp = extra_functions.is_binary(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'd':\r\n tmp = int(Line[i][:-1], 10)\r\n expression.append(tmp)\r\n elif Line[i].isdecimal():\r\n expression.append(int(Line[i]))\r\n else:\r\n return False\r\n elif (Line[i] == \"lengthof\") or (Line[i] == \"sizeof\") or (Line[i] == \"type\") or (Line[i] == \"dup\"):\r\n if (Line[i] == \"dup\"):\r\n if stak.__len__()>0:\r\n j = stak.__len__() - 1\r\n while (j >= 0):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n S = []\r\n L = []\r\n i = 1 + i\r\n while (i < len(Line)):\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n S.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(S) == 0:\r\n return False\r\n j = len(S) - 1\r\n while j >= 0:\r\n if (S[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (S[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (S[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (S[j] == '[') and (Line[i] == ']'):\r\n break\r\n S = S[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n S = S[:-1]\r\n\r\n L.append(Line[i])\r\n if len(S) == 0:\r\n break\r\n i += 1\r\n if L.__len__() > 1:\r\n if (L[L.__len__() - 1] == ')') and (L[0] == '('):\r\n L = L[:-1]\r\n L = L[1:]\r\n else:\r\n return False\r\n else:\r\n return False\r\n tmp = self.postfix(L)\r\n i = i + 1\r\n if tmp != False:\r\n tmp1 = self.Calc_infix(expression)\r\n if tmp1 != False:\r\n for j in range(0, tmp1[0]):\r\n infix = infix + tmp\r\n else:\r\n return False\r\n else:\r\n return False\r\n expression=[\"dup\"]\r\n continue\r\n stak.append(Line[i])\r\n else:\r\n if (Line[i] == '*') | (Line[i] == '-') | (Line[i] == '/') | (Line[i] == '+'):\r\n if len(stak) > 0:\r\n j = len(stak) - 1\r\n while (j >= 0):\r\n if ((stak[j] == '+') | (stak[j] == '-')) & ((Line[i] == '+') | (Line[i] == '-')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '+') | (stak[j] == '-')) & ((Line[i] == '*') | (Line[i] == '/')):\r\n break\r\n elif ((stak[j] == '*') | (stak[j] == '/')) & ((Line[i] == '*') | (Line[i] == '/')):\r\n\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '*') | (stak[j] == '/')) & ((Line[i] == '+') | (Line[i] == '-')):\r\n\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif (stak[j] == 'dup') | (stak[j] == 'lengthof') | (stak[j] == 'type') | (stak[j] == 'sizeof'):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n else:\r\n break\r\n j = j - 1\r\n stak.append(Line[i])\r\n else:\r\n expression.append(Line[i])\r\n i += 1\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') or (stak[j] == '['):\r\n return False\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n\r\n if (expression.__len__() > 0)and(expression!=[\"dup\"]):\r\n infix.append(expression)\r\n return infix", "def infix_to_postfix(string):\n \n # Validate and tokenize the string\n tokens = validate(string)\n \n # Initialize the stack\n s = Stack()\n\n # Ready the final postfix expression\n postfix = ''\n \n # List of operators that have to be handled\n operators = ['+', '-', '*', '/', '^', 'sqrt', 'u-', '(', ')']\n \n # Iterate through tokens\n for token in tokens:\n if token in operators:\n if token in ['sqrt', 'u-']:\n # Square root and unary minus have the highest precendence. So\n # they get pushed on to the stack immediately\n s.push(token)\n elif token == '^':\n top = s.peek()\n while top in ['sqrt', 'u-']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['*', '/']:\n # Multiplication and division have the same precedence. Order\n # is determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['+', '-']:\n # Addition and subtraction have the same precedence. Order is\n # determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^', '*', '/']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token == '(':\n s.push(token)\n elif token == ')':\n top = s.peek()\n while top != '(':\n postfix += s.pop() + ' '\n top = s.peek()\n s.pop()\n else: # Token is a number or variable\n postfix += token + ' '\n\n # Pop out any more operators that might be sitting on the stack\n while(len(s)):\n postfix += s.pop() + ' '\n\n # Get rid of trailing whitespace and print\n postfix = postfix.strip()\n return postfix", "def resolve_expression(self):\n stack = list()\n\n for element in self._get_postfix_notation():\n if element in self.OPERATORS: # get two elements from top of stack, push result of operation on stack\n operand_a = stack.pop()\n operand_b = stack.pop()\n value = self._calculate(operand_b, operand_a, element)\n stack.append(value)\n else: # push to stack if number\n stack.append(element)\n\n return stack.pop()" ]
[ "0.7719225", "0.74093944", "0.7368014", "0.7160753", "0.7056071", "0.6832468", "0.680652", "0.67106664", "0.67052156", "0.6677665", "0.6639799", "0.65545446", "0.64869434", "0.6457488", "0.64207363", "0.64158213", "0.6396951", "0.63920987", "0.6357941", "0.6345688", "0.63173926", "0.62611854", "0.62564194", "0.62098736", "0.61826485", "0.6181747", "0.61276793", "0.609662", "0.60883844", "0.5916656" ]
0.7796605
0
To test for an invalid postfix expression. You may assume that what is passed in is a string that only contains numbers and operators. These are separated into valid tokens by spaces so you can use split and join as necessary.
def postfix_valid(postfix_expr): expr = postfix_expr.split() count = 0 if postfix_expr == "": return False for token in expr: if token[0] in '0123456789': count += 1 elif token == '~': pass else: # all other binary operators count -= 1 if count < 0: return False if count == 1: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(string):\n \n tokens = string.split()\n \n # Remembers if the previous token was an operator\n opflag = True\n \n ## Highly inefficient validity checking begins here ##\n \n # List of operators as they would appear in the infix expression\n operators = ['+', '-', '*', '/', '^', 'sqrt']\n \n # First and foremost, detect all unary minus signs and mark them as such\n for i in xrange(len(tokens)):\n # A unary minus is a minus operator which occurs after another operator\n # or after an open parenthesis.\n if tokens[i] in operators or tokens[i] == '(':\n if opflag:\n if tokens[i] == '-':\n tokens[i] = 'u-'\n # Leave opflag true to allow cascading of unary minuses\n elif tokens[i] in ['sqrt', '(']:\n # These operators can be cascaded, so leave them alone\n # Also, leave opflag true to handle a subsequent u-\n pass\n else:\n # Any other operator must be caught\n raise ExpressionError('Operators cannot be cascaded!')\n # We found an operator, but opflag isn't true. Set it.\n else:\n opflag = True\n else:\n # We found something other than an operator, or a ')'. If opflag is\n # false, and the token is not ')', then we have two adjacent\n # variables/numbers. This is also an invalid combination\n if not opflag and tokens[i] != ')':\n raise ExpressionError('Adjacent operands with no operator!')\n # Otherwise, unset opflag\n else:\n opflag = False\n \n # Check whether parentheses match\n s = Stack()\n for token in tokens:\n if token == '(':\n s.push(token)\n elif token == ')':\n if s.pop() != '(':\n raise ExpressionError('Parentheses do not match')\n if not s.is_empty():\n raise ExpressionError('Parentheses do not match')\n \n return tokens", "def postfix_eval(input_str):\n\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n if input_str is None: raise PostfixFormatException\n # create list of operands and operators\n term_list = input_str.split()\n # initialize stack large enough to contain all operands\n operand_stack = Stack(2*len(term_list)//3+1)\n # iterate over term_list\n for term in term_list:\n # check for operatorm, evaluate operators on A & B if True\n if operator_present(term) is True:\n if operand_stack.size()<2: \n raise PostfixFormatException(\"Insufficient operands\")\n B = operand_stack.pop()\n A = operand_stack.pop()\n operand_stack.push(\n calculate(\n A, # A\n B, # B\n term) # operator\n )\n # check for operand, push to stack if True\n elif operand_present(term) is True:\n operand_stack.push(term)\n else: raise PostfixFormatException(\"Invalid token\")\n if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n return operand_stack.pop()", "def eval_postfix(s):\n stack = Stack()\n \n s = s.split()\n for i in s:\n \tif operator(i) == False:\n \t\tstack.push(int(i))\n \telse:\n \t\tb = stack.pop()\n \t\ta = stack.pop()\n \t\tresult = evaluate(a, i, b)\n \t\tstack.push(result)\n return stack.pop()", "def postfix_eval(postfix_expr):\n s = StackArray()\n expr = postfix_expr.split()\n for token in expr:\n if token[0] in '0123456789':\n res = token\n s.push(res)\n else: # token is operator\n op2 = s.pop()\n op2 = float(op2)\n if s.is_empty(): # token is ~\n # could also be ~ for non-empty stack\n res = -1 * op2\n else:\n op1 = s.pop()\n op1 = float(op1)\n if token == '^':\n res = op1 ** op2\n elif token == '~':\n s.push(op1)\n res = -1 * op2\n elif token == '*':\n res = op1 * op2\n elif token == '/':\n if op2 == 0:\n raise ZeroDivisionError\n else:\n res = op1 / op2\n elif token == '+':\n res = op1 + op2\n else: # token == '-'\n res = op1 - op2\n s.push(res)\n return res", "def evaluatePostfixExp(self, postfixExpr):\n\n operandStack = []\n tokenList = postfixExpr.split(\" \")\n\n for token in tokenList:\n if self.isOperand(token):\n if \".\" in token:\n token = float(token)\n else:\n token = int(token)\n operandStack.append(token)\n else: # token is an operator\n operand2 = operandStack.pop()\n operand1 = operandStack.pop()\n try:\n result = self.applyOperator(operand1, operand2, token)\n except Exception as error:\n print(\"Invalid input. Please enter a valid arithmetic expression.\") # Most likely division by\n # zero error.\n return\n operandStack.append(result)\n return operandStack.pop()", "def validate_expression(str):\n stack = []\n pushChars, popChars = \"([\", \")]\"\n for c in str:\n if c in pushChars:\n stack.append(c)\n elif c in popChars:\n if not len(stack):\n raise ValidationError('La expresion tiene corchetes \\'[ ]\\' o parentesis \\'( )\\' sin cerrar')\n else:\n stackTop = stack.pop()\n balancingBracket = pushChars[popChars.index(c)]\n if stackTop != balancingBracket:\n raise ValidationError('La expresion tiene corchetes \\'[ ]\\' o parentesis \\'( )\\' sin cerrar')\n\n if len(stack):\n raise ValidationError('La expresion tiene corchetes \\'[ ]\\' o parentesis \\'( )\\' sin cerrar')", "def infix_to_postfix(input_str): # postfix requires that all operators proceed after the two operands that they work on\n\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n if input_str is None: raise ValueError\n # Split input string\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # Create output list, will be fed to postfix_eval() at end\n output_list = []\n # initialize stack large enough to contain all operators\n operator_stack = Stack(len(term_list)//3+1)\n for term in term_list:\n # check for operand, if present append to output list\n if operand_present(term) is True:\n output_list.append(term)\n # check for operator\n elif operator_present(term) or term == '(' or term == ')':\n #if operand_stack.size()<2: \n # raise PostfixFormatException(\"Insufficient operands\")\n # Check for open parentheses\n if term == '(': operator_stack.push(term)\n # Check for closing parentheses, pop stack until open parentheses found\n elif term == ')':\n while 1:\n token = operator_stack.pop()\n if token != '(': \n output_list.append(token)\n else: break\n # Otherwise push to stack but pop any higher/equal order operators\n else:\n sort_operators(term, operator_stack, output_list)\n #print(operator_stack.peek())\n #else: raise PostfixFormatException(\"Invalid token\")\n #if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def postfix_eval(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n raise PostfixFormatException('Insufficient operands')\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if i in op_list:\n try:\n num_val = stack.pop()\n num_val_initial = stack.pop()\n except IndexError:\n raise PostfixFormatException(\"Insufficient operands\")\n if i == \"+\":\n stack.push(num_val_initial + num_val)\n if i == \"-\":\n stack.push(num_val_initial - num_val)\n if i == \"*\":\n stack.push(num_val_initial * num_val)\n if i == \"/\":\n if num_val == 0:\n raise ValueError(\"0 not divisible\")\n stack.push(num_val_initial / num_val)\n if i == \"**\":\n stack.push(num_val_initial ** num_val)\n if i == \"<<\":\n t1 = type(num_val)\n t2 = type(num_val_initial)\n if t1 == float or t2 == float:\n raise PostfixFormatException(\"Illegal bit shift operand\")\n stack.push(num_val_initial << num_val)\n if i == \">>\":\n t1 = type(num_val)\n t2 = type(num_val_initial)\n if t1 == float or t2 == float:\n raise PostfixFormatException(\"Illegal bit shift operand\")\n stack.push(num_val_initial >> num_val)\n elif new_val.isdigit():\n if \".\" in i:\n stack.push(float(i))\n else:\n stack.push(int(i))\n else:\n raise PostfixFormatException(\"Invalid token\")\n val = stack.pop()\n if not stack.is_empty():\n raise PostfixFormatException(\"Too many operands\")\n return val", "def isPostfixOp(tokens):\n stop = SwiftSupport.getLastOpTokenIndex(tokens)\n if stop == -1:\n return False\n\n start = tokens.index\n prevToken = tokens.get(start - 1)\n nextToken = tokens.get(stop + 1)\n prevIsWS = SwiftSupport.isLeftOperatorWS(prevToken)\n nextIsWS = SwiftSupport.isRightOperatorWS(nextToken)\n result = not prevIsWS and nextIsWS or not prevIsWS and nextToken.type == DOT\n text = tokens.getText(start, stop)\n return result", "def eval_postfix(s):\n stack = Stack()\n for x in s.split(): # rozděl 's' dle mezer\n if x == '+':\n stack.push(stack.pop() + stack.pop())\n elif x == '-':\n stack.push(-stack.pop() + stack.pop())\n elif x == '*':\n stack.push(stack.pop() * stack.pop())\n elif x == '/':\n second = stack.pop()\n stack.push(stack.pop() / second)\n else:\n stack.push(float(x))\n return stack.pop()", "def valid_expression(expression):\n OPERATORS= '+*/-'\n if no_operators(expression) != True:\n return no_operators(expression)\n if no_paranthesis(expression) != True:\n return no_paranthesis(expression)\n if no_numbers(expression) != True:\n return no_numbers(expression)\n if invalid_characters(expression) != True:\n return invalid_characters(expression)\n if match_paranthesis(expression) == False:\n raise NotValidExpression('Not a valid expression, brackets mismatched.')\n number_operators = 0\n number_paranthesis = 0\n for i in expression:\n if i in OPERATORS:\n number_operators += 1\n elif i == '(' or i == ')':\n number_paranthesis +=1\n expression1 = expression[1:(len(expression) - 1)] # checks if the expression without the first and last character is valid\n if match_paranthesis(expression1) == False and ('(' in expression1 or ')' in expression1):\n raise NotValidExpression('Not a valid expression, brackets mismatched.') # if it is not, raises an appropiate error\n for i in range(0, len(expression) - 1):\n #Checks if an operator is missing,if there exists a number followed by ( or if there is a )before the number\n if expression[i] not in OPERATORS and expression[i] not in '()':\n if expression[i + 1] == '(':\n raise NotValidExpression('Not a valid expression, operator missing.')\n elif expression[i] in OPERATORS and expression[i + 1] in OPERATORS + ')' :\n raise NotValidExpression('Not a valid expression, wrong placement of operators')\n #Checks if an operator is placed wrongly , before ) or next to another operator\n if expression[i+1] not in OPERATORS and expression[i + 1] not in '()':\n if expression[i] == ')':\n raise NotValidExpression('Not a valid expression, operator missing.')\n elif expression[i+1] in OPERATORS and expression[i] in OPERATORS + '(':\n raise NotValidExpression('Not a valid expression, wrong placement of operators')\n if 2*number_operators != number_paranthesis: # an expression is valid only if the number of paranthesis is equal to the double of the number of operators\n raise NotValidExpression('Not a valid expression, wrong number of operands.')\n return True", "def isOperand(self, token):\n if len(token) == 1:\n if token in self.operands:\n return True\n elif len(token) > 1:\n validChars = self.operands + '+-'\n for eachChar in token:\n if eachChar not in validChars:\n return False\n return True", "def evaluate_infix(string):\n return postfix(infix_to_postfix(string))", "def toPostfix (self,infix):\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix", "def infix_to_postfix(string):\n \n # Validate and tokenize the string\n tokens = validate(string)\n \n # Initialize the stack\n s = Stack()\n\n # Ready the final postfix expression\n postfix = ''\n \n # List of operators that have to be handled\n operators = ['+', '-', '*', '/', '^', 'sqrt', 'u-', '(', ')']\n \n # Iterate through tokens\n for token in tokens:\n if token in operators:\n if token in ['sqrt', 'u-']:\n # Square root and unary minus have the highest precendence. So\n # they get pushed on to the stack immediately\n s.push(token)\n elif token == '^':\n top = s.peek()\n while top in ['sqrt', 'u-']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['*', '/']:\n # Multiplication and division have the same precedence. Order\n # is determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['+', '-']:\n # Addition and subtraction have the same precedence. Order is\n # determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^', '*', '/']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token == '(':\n s.push(token)\n elif token == ')':\n top = s.peek()\n while top != '(':\n postfix += s.pop() + ' '\n top = s.peek()\n s.pop()\n else: # Token is a number or variable\n postfix += token + ' '\n\n # Pop out any more operators that might be sitting on the stack\n while(len(s)):\n postfix += s.pop() + ' '\n\n # Get rid of trailing whitespace and print\n postfix = postfix.strip()\n return postfix", "def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix", "def postfix(t_input):\r\n # guardo se gli elementi contengono caratteri non validi\r\n if is_valid(t_input) == 1:\r\n # restituisco Invalid se sono stati trovati caratteri invalidi\r\n result = \"Invalid\"\r\n return result\r\n\r\n # scorri di nuovo gli elementi\r\n # NOTA: sarebbe piu' efficiente fare un unico ciclo\r\n for element in t_input.strip(\"\\0\").split(\" \"):\r\n if element in [\"-\", \"+\", \"*\", \"/\"]:\r\n # ho trovato operatore, ricavo operandi dallo stack\r\n right_operand = stack.pop()\r\n left_operand = stack.pop()\r\n\r\n # faccio l'operazione che serve\r\n if element == \"-\":\r\n op_result = left_operand - right_operand\r\n elif element == \"+\":\r\n op_result = left_operand + right_operand\r\n elif element == \"*\":\r\n op_result = left_operand * right_operand\r\n else:\r\n op_result = left_operand // right_operand\r\n\r\n if boold:\r\n print(\"[DEBUG] Ho trovato operatore '{}': {} {} {} = {}\".format(element, left_operand, element, right_operand, op_result))\r\n # inserisco nello stack il risultato dell'operazione\r\n stack.push(op_result)\r\n else:\r\n # ho trovato operando, lo metto nello stack\r\n # > NOTA: e' necessaria conversione stringa -> intero\r\n stack.push(int(element))\r\n \r\n if boold:\r\n stack.print()\r\n\r\n # il risultato e' l'ultimo elemento\r\n # > NOTA: e' necessaria conversione intero -> stringa\r\n result = str(stack.pop())\r\n return result", "def no_operators(expression):\n OPERATORS = set('+-*/')\n for i in expression:\n if i in OPERATORS:\n return True\n raise NotValidExpression('Not a valid expression, no operators')", "def infix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n stack = Stack(30)\n if input_str == '':\n return ''\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n order = {}\n order[\"+\"] = 1\n order[\"-\"] = 1\n order[\"*\"] = 2\n order[\"/\"] = 2\n order[\"**\"] = 3\n order[\"<<\"] = 4\n order[\">>\"] = 4\n pfix_str = ''\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit() and pfix_str == \"\":\n pfix_str = pfix_str + i\n elif i in op_list:\n if not stack.is_empty():\n p = stack.peek()\n while 0 < stack.size():\n p = stack.peek()\n if p == \"(\":\n break\n if i == \"**\":\n if order[p] <= order[i]:\n break\n else:\n p1 = stack.pop()\n pfix_str = pfix_str + \" \" + p1\n elif order[p] < order[i]:\n break\n else:\n p2 = stack.pop()\n pfix_str = pfix_str + \" \" + p2\n stack.push(i)\n elif i == \"(\":\n stack.push(i)\n elif new_val.isdigit():\n pfix_str = pfix_str + \" \" + i\n elif i == \")\":\n p = stack.peek()\n while p != \"(\":\n pfix_str = pfix_str + \" \" + stack.pop()\n if not stack.is_empty():\n p = stack.peek()\n stack.pop()\n while not stack.is_empty():\n pop3 = stack.pop()\n pfix_str = pfix_str + \" \" + pop3\n return pfix_str", "def evaluate(s:str)->str:\n t = s.split()\n res = ''\n\n # Check valid operator \n if t[1] not in ['+','-']:\n return \"Error: Operator must be '+' or '-'.\"\n\n # check valid number \n try:\n t1 = int(t[0])\n t2 = int(t[2])\n \n except ValueError:\n return \"Error: Numbers must only contain digits.\"\n\n # check if numbers are 4 digits \n if (t1>9999 or t1 < -9999 or t2>9999 or t2<-9999):\n return \"Error: Numbers cannot be more than four digits.\"\n \n # addition \n if t[1] == '+':\n res = t1 + t2\n return str(res)\n \n # subtraction \n elif t[1] == '-':\n res = t1 -t2\n return str(res)", "def toPostfix(infix):\n output = \"\" # Output stack - the numbers in our expression\n operators = \"\" # Operator stack (using string for ease but could be a list)\n precedence = {\"*\": 100, \"/\": 90, \"+\": 80, \"-\": 70, \"(\": 60, \")\": 50} # Operator precedence dictionary - operator characters mapped to an arbitrary numeric value representing their precedence (BOMDAS)\n \n #Loop through characters\n for c in infix:\n #If c is a number\n if (c.isdigit()):\n output += c\n #Else if c is a function - ignoring these for now\n #Else if c is an operator - + - * / might account for x and division ASCII symbol later\n elif c in {\"+\", \"-\", \"*\", \"/\"}:\n # While there is still an operator left at the top of the stack\n # AND the operator at the top of the stack has greater precedence\n # OR the operator at the top of the stack has equal precedence and the token is left associative (don't know what this means, ignoring for now)\n # AND that operator is not a left parenthesis '('\n # Note: \\ tells python that a statement will continue on to the next line\n while len(operators) > 0 and operators[-1] != '(' and precedence[operators[-1]] > precedence[c]:\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # Push it onto the operator stack\n operators += c\n # Else if token is a left parenthesis (\n elif c == \"(\":\n # Push c to operator stack\n operators += c\n elif c == \")\":\n while operators[-1] != \"(\":\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # If there is a left bracket at the top of the stack, remove it\n if operators[-1] == '(':\n # Pop the operator from the operator stack and discard it\n operators = operators[:-1]\n # if there is a function token at the top of the operator stack... (Ignoring this for now)\n \n # If there are any operators left in the stack, append to output\n while len(operators) > 0:\n # Push operator from top of stack to output\n output += operators[-1]\n # Remove top operator from stack\n operators = operators[:-1]\n return output", "def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def infix_to_postfix(infix_expr):\n # Append adds new item to list\n # Concat creates a new list every time instead\n\n opstack = StackArray()\n res = []\n lstr = infix_expr.split()\n # l_para = r_para = 0\n # operator precedence dict\n prec = { # higher val = higher prec\n \"(\" : 4,\n \"^\" : 3, # r-to-l (i.e. 2^3^2 = 2^(3^2) )\n \"~\" : 3, # right-to-left (i.e. -3^2 = -9)\n # '*/+-' are associated left to right\n \"*\" : 2,\n \"/\" : 2,\n \"+\" : 1,\n \"-\" : 1\n }\n for token in lstr:\n if token[0] in '0123456789':\n res.append(token)\n # not opstack.is_empty() guards against IndexError on empty peek\n if not opstack.is_empty() and opstack.peek() == '^':\n res.append(opstack.pop())\n if not opstack.is_empty() and opstack.peek() == '~':\n res.append(opstack.pop())\n elif token == '(':\n # l_para += 1\n opstack.push(token)\n elif token == ')':\n # r_para += 1\n # opstack can't be empty for proper formatted input\n while opstack.peek() != '(':\n res.append(opstack.pop())\n opstack.pop() # remove left paran '('\n else: # token is ^ ~ * / + -: <-- operators\n while not opstack.is_empty() and prec[token] <= prec[opstack.peek()]:\n if opstack.peek() == '(':\n break\n elif token == '^' and opstack.peek() == '~':\n break\n else:\n res.append(opstack.pop())\n opstack.push(token)\n # if l_para != r_para:\n # raise SyntaxError\n while not opstack.is_empty():\n res.append(opstack.pop())\n res = \" \".join(res)\n res.strip()\n return res", "def calculator(infix_expr):\n\n # Assign precedence values to operators\n prec = {}\n prec['^'] = 4\n prec['*'] = 3\n prec['/'] = 3\n prec['+'] = 2\n prec['-'] = 2\n prec['('] = 1\n\n # Instantiate stacks\n operand_stack = Stack()\n operator_stack = Stack()\n\n try:\n token_list = infix_expr.split()\n logging.debug(\"token_list = {}\".format(token_list))\n except:\n sys.exit(1)\n\n for token in token_list:\n logging.debug(\"token = {}\".format(token))\n if token in '0123456789':\n operand_stack.push(int(token))\n logging.debug(\"operand_stack.push = {}\".format(token))\n elif token == '(':\n operator_stack.push(token)\n logging.debug(\"operator_stack.push = {}\".format(token))\n elif token == ')':\n logging.debug(\"token = {}\".format(token))\n operator_token = operator_stack.pop()\n logging.debug(\"operator_stack.pop = {}\".format(operator_token))\n while operator_token != '(':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"while operator_token != '(':\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_token = operator_stack.pop()\n logging.debug(\"new operator_token = {}\".format(operator_token))\n elif token in '^*/+-':\n while (not operator_stack.isEmpty()) and \\\n (prec[operator_stack.peek()] >= prec[token]):\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"Operator - While:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_stack.push(token)\n logging.debug(\"operator_stack.push(): {}\".format(token))\n else:\n logging.debug(\"else.... exiting....\")\n sys.exit(1)\n\n # Use all remaining operators\n if not operator_stack.isEmpty():\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n logging.debug(\"Remaining Operators:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operand_stack.push(result)\n\n return operand_stack.pop()", "def parse(expr, whitelist):\n # remove all whitespace\n expr = re.sub(r'\\s+', '', expr)\n\n seq = []\n parsed = []\n for ch in expr:\n if ch in valid_chars:\n seq.append(ch)\n elif ch in operators or ch.isdigit():\n if seq:\n sym = process_sequence(seq, whitelist)\n parsed.append(sym)\n seq = []\n\n # power operator\n if ch == '^':\n ch = '**'\n\n parsed.append(ch)\n else:\n raise ValueError('Illegal character: \"{}\"'.format(ch))\n\n if seq:\n parsed.append(process_sequence(seq, whitelist))\n return ''.join(parsed)", "def postfixCalc(self,tokens):\n if len(tokens) == 0:\n return 0\n stack = []\n # while expr is not empty\n while len(tokens)>0:\n toke = tokens.pop(0)\n # if token is a number push it onto the stack\n if isFloat(toke):\n stack.append(float(toke))\n # if token is a special number push it onto the stack\n elif toke in Calculator.specialNumbers:\n stack.append(Calculator.specialNumbers[toke])\n else:\n # Operators take 2 inputs, functions take 1 input except root which takes 2\n if toke in Calculator.operators or toke == 'root':\n n = 2\n elif toke in Calculator.functions:\n n = 1\n # If the length of the stack is less than the required number of operators the user has not \n # input enough values.\n if len(stack)<n:\n return \"Too Few Error\"\n # Pop the top n numbers from the stack\n popedVals = []\n for i in range(n):\n popedVals.append(stack.pop())\n # Evaluate the operator using the number(s) that were popped, and push back onto the stack\n if n == 2 and toke in Calculator.operators:\n stack.append(Calculator.operators[toke][0](popedVals[1], popedVals[0]))\n elif n == 2:\n stack.append(Calculator.functions[toke](popedVals[1], popedVals[0]))\n elif n == 1:\n stack.append(Calculator.functions[toke](popedVals[0]))\n # If there is more than one value left on the stack the user has input too many values\n if len(stack) > 1:\n return \"Too Many Error\"\n # Return the value on the stack (should only be 1 value left)\n return stack[-1]", "def infixToPostfix(inFixStr):\n postFixList = []\n s = Stack()\n chList = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n prec = {\"(\": 0, \"+\": 1, \"-\": 1, \"*\": 2, \"/\": 2} # operator precedence\n\n tok = inFixStr.split(\" \")\n for ch in tok: # ch can be (,), operand, operator\n if ch in chList: # the easy case when token is an operand\n postFixList.append(ch)\n elif ch == \"(\": # easy case of (\n s.push(ch)\n elif ch == \")\": # keep popping and appending until (\n top = s.pop()\n while top != \"(\":\n postFixList.append(top)\n top = s.pop() # pop next\n else: # now we are at opeartors\n # pop higher order operators first\n while not s.isEmpty() and prec[s.peek()] > prec[ch]:\n postFixList.append(s.pop())\n s.push(ch) # push current opeartor\n\n while not s.isEmpty(): # pop everything else in the stack\n postFixList.append(s.pop())\n return \" \".join(postFixList)", "def infix_to_postfix(expr):\n # you may find the following precedence dictionary useful\n prec = {'*': 2, '/': 2,\n '+': 1, '-': 1}\n ops = Stack()\n postfix = []\n toks = expr.split()\n ### BEGIN SOLUTION\n opp = {'*', '/','+', '-'}\n for x in toks:\n if str.isdigit(x):\n postfix.append(x)\n elif ops.empty() or ops.peek() == '(':\n ops.push(x)\n elif x == '(':\n ops.push(x)\n elif x == ')':\n while not ops.empty():\n temp = ops.pop()\n if temp == '(':\n break\n else:\n postfix.append(temp)\n elif x in opp:\n while True:\n if prec.get(x) > prec.get(ops.peek()):\n ops.push(x)\n break\n elif prec.get(x) == prec.get(ops.peek()):\n postfix.append(ops.pop())\n ops.push(x)\n break\n elif prec.get(x) < prec.get(ops.peek()):\n postfix.append(ops.pop())\n if ops.empty():\n ops.push(x)\n break\n elif ops.empty():\n break\n\n while True:\n if not ops.empty():\n postfix.append(ops.pop())\n else:\n break\n\n ### END SOLUTION\n return ' '.join(str(x) for x in postfix)", "def validateOperator(self, tokens):\n return tokens", "def infixToPostfix(infix):\n postfix = []\n stackArr = []\n scanOperand = False\n hasIntegral = False\n hasDecimal = False\n currentOperand = 0\n decimal = 1\n for ch in infix:\n currentPrio = charPrio(ch)\n if currentPrio < 0: # current ele is operand\n if not (ch.isdigit() or ch == '.'):\n inputError()\n return\n if not scanOperand:\n scanOperand = True\n if ch == '.':\n if not hasIntegral:\n formatError()\n return\n hasDecimal = True\n continue\n if hasDecimal:\n if ch == '.':\n formatError()\n return\n currentOperand = currentOperand + 0.1 ** decimal * int(ch)\n decimal += 1\n else:\n if not hasIntegral:\n hasIntegral = True\n currentOperand = currentOperand * 10 + int(ch)\n elif currentPrio == 0:\n # none operation\n pass\n else:\n # and operand into postfix expression\n if scanOperand:\n scanOperand = False\n hasDecimal = False\n hasIntegral = False\n decimal = 1\n postfix.append(currentOperand)\n currentOperand = 0\n # handle operator\n if isEmpty(stackArr):\n push(stackArr, ch) # push into stack\n elif currentPrio > prio[peek(stackArr)]:\n push(stackArr, ch) # push into stack\n elif currentPrio == 1: # ')'\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)]:\n ele = pop(stackArr)\n if ele != '(':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n else:\n break\n else:\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)] and prio[peek(stackArr)] < 5 :\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n push(stackArr, ch) # push into stack\n if scanOperand:\n postfix.append(currentOperand)\n while not isEmpty(stackArr):\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n return postfix" ]
[ "0.7520136", "0.6987341", "0.69246596", "0.6914398", "0.67455107", "0.6717881", "0.6708455", "0.67051095", "0.66992074", "0.6603976", "0.6578779", "0.6534092", "0.646224", "0.6458865", "0.6412046", "0.63550186", "0.63059497", "0.62621725", "0.6261374", "0.6159734", "0.6157778", "0.6133061", "0.61075544", "0.6103541", "0.6096212", "0.6085565", "0.6085177", "0.60186964", "0.6000831", "0.5983782" ]
0.7960342
0
Handles bootstrapping of system restart for exchange resources and broker state. Ensures ExchangePoint and ExchangeSpace resources in system have a properly declared AMQP exchange Ensures ExchangeName resources in system have a properly declared queue Logs all exchanges/queues it didn't understand Purges all service queues as long as no consumers are attached, or can be overridden with force=True on pycc command line
def on_restart(self, process, config, **kwargs): ex_manager = process.container.ex_manager old_use_ems = ex_manager.use_ems ex_manager.use_ems = False # get list of queues from broker with full props that have to do with our sysname all_queues = ex_manager._list_queues() queues = {q['name']:q for q in all_queues if q['name'].startswith(get_sys_name())} # get list of exchanges from broker with full props all_exchanges = ex_manager._list_exchanges() exchanges = {e['name']:e for e in all_exchanges if e['name'].startswith(get_sys_name())} # now get list of XOs from RR xs_objs, _ = process.container.resource_registry.find_resources(RT.ExchangeSpace) xp_objs, _ = process.container.resource_registry.find_resources(RT.ExchangePoint) xn_objs, _ = process.container.resource_registry.find_resources(RT.ExchangeName) # # VERIFY XSs have a declared exchange # rem_exchanges = set(exchanges) for rrxs in xs_objs: xs = ExchangeSpace(ex_manager, ex_manager._priviledged_transport, rrxs.name) if xs.exchange in rem_exchanges: rem_exchanges.remove(xs.exchange) else: log.warn("BootstrapExchange restart: RR XS %s, id: %s NOT FOUND in exchanges", rrxs.name, rrxs._id) for rrxp in xp_objs: xp = ExchangePoint(ex_manager, ex_manager._priviledged_transport, rrxp.name) if xp.exchange in rem_exchanges: rem_exchanges.remove(xp.exchange) else: log.warn("BootstrapExchange restart: RR XP %s, id %s NOT FOUND in exchanges", rrxp.name, rrxp._id) # events and main service exchange should be left if get_sys_name() in rem_exchanges: rem_exchanges.remove(get_sys_name()) else: log.warn("BootstrapExchange restart: no main service exchange %s", get_sys_name()) event_ex = "%s.pyon.events" % get_sys_name() if event_ex in rem_exchanges: rem_exchanges.remove(event_ex) else: log.warn("BootstrapExchange restart: no events exchange %s", event_ex) # what is left? for exchange in rem_exchanges: log.warn("BootstrapExchange restart: unknown exchange on broker %s", exchange) # # VERIFY XNs have a declared queue # rem_queues = set(queues) for rrxn in xn_objs: # can instantiate ExchangeNames, don't need specific types # @TODO: most queue types have a name instead of anon """ # @TODO: except queue type, which needs to be fixed to record declared name type if rrxn.xn_type == "QUEUE": log.info("TODO: queue type XNs, %s", rrxn.name) continue """ exchange_space_list, assoc_list = process.container.resource_registry.find_subjects(RT.ExchangeSpace, PRED.hasExchangeName, rrxn._id) if not len(exchange_space_list) == 1: raise StandardError("Associated Exchange Space to Exchange Name %s does not exist" % rrxn._id) rrxs = exchange_space_list[0] xs = ExchangeSpace(ex_manager, ex_manager._priviledged_transport, rrxs.name) xn = ExchangeName(ex_manager, ex_manager._priviledged_transport, rrxn.name, xs) if xn.queue in rem_queues: rem_queues.remove(xn.queue) else: log.warn("BootstrapExchange restart: RR XN %s, type %s, id %s NOT FOUND in queues", xn.queue, xn.xn_type, xn._id) # get list of service name possibilities svc_objs, _ = process.container.resource_registry.find_resources(RT.ServiceDefinition) svc_names = [s.name for s in svc_objs] # PROCESS QUEUES + SERVICE QUEUES- not yet represented by resource proc_queues = set() svc_queues = set() for queue in list(rem_queues): # PROCESS QUEUES: proc manager spawned # pattern "<sysname>.<containerid>.<pid>" pieces = queue.split(".") if len(pieces) > 2 and pieces[-1].isdigit(): proc_queues.add(queue) rem_queues.remove(queue) continue # SERVICE QUEUES # pattern "<sysname>.<service name>" if len(pieces) == 2: if pieces[-1] in svc_names: svc_queues.add(queue) rem_queues.remove(queue) # @TODO: PD-spawned process queues # pattern "<sysname>.<service_name><hex>" # leftover queues now for queue in rem_queues: log.warn("Unknown queue: %s", queue) # # EMPTY SERVICE QUEUES # for queue in svc_queues: if int(queues[queue]['consumers']) > 0 and not process.CFG.get_safe('force', False): log.warn("Refusing to empty service queue %s with consumers (%s), specify force=True to override", queue, queues[queue]['consumers']) else: ex_manager.purge_queue(queue) log.info("Purged service queue %s of %s messages", queue, queues[queue]['messages']) ex_manager.use_ems = old_use_ems
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_initial_bootstrap(self, process, config, **kwargs):\n\n # get default org_id\n # @TODO: single org assumed for now\n org_ids = process.container.resource_registry.find_resources(RT.Org, id_only=True)\n if not (len(org_ids) and len(org_ids[0]) == 1):\n raise StandardError(\"Could not determine org_id\")\n\n org_id = org_ids[0][0]\n\n ems_client = ExchangeManagementServiceProcessClient(process=process)\n\n #\n # Create XSs and XPs\n #\n for xsname, xsdict in config.get_safe('exchange_spaces', {}).iteritems():\n xso = ResExchangeSpace(name=xsname)\n xso_id = ems_client.create_exchange_space(xso, org_id)\n\n log.info(\"ExchangeSpace %s, id %s\", xsname, xso_id)\n\n for xpname, xpopts in xsdict.get('exchange_points', {}).iteritems():\n\n # @TODO: some translation for types CFG currentl has it as \"topic_tree\" and we've been using \"ttree\"\n ttype = xpopts.get('type', 'topic_tree')\n if ttype == \"topic_tree\":\n ttype = \"ttree\"\n\n xpo = ResExchangePoint(name=xpname, topology_type=ttype)\n xpo_id = ems_client.create_exchange_point(xpo, xso_id)\n\n log.info(\"\\tExchangePoint %s, id %s\", xpname, xpo_id)\n\n #\n # Create and associate brokers with XSs\n #\n for brokername in xsdict.get('brokers', []):\n xbo = ResExchangeBroker(name=brokername)\n xbo_id = ems_client.create_exchange_broker(xbo)\n\n log.info(\"\\tExchangeBroker %s, id %s\", brokername, xbo_id)\n\n # directly associate broker with XS\n # @TODO: should EMS provide this?\n # first find out if the assoc exists already\n assocs = process.container.resource_registry.find_associations(xso_id, PRED.hasExchangeBroker, id_only=True)\n if len(assocs) > 0:\n continue\n process.container.resource_registry.create_association(xso_id, PRED.hasExchangeBroker, xbo_id)", "def configure_rabbit (ec2_conn,base_name,params ):\n print \"configuring rabbitmq exchanges and Queues\"\n app_type = 'RABBITMQ'\n \n logging.basicConfig()\n \n ## Allow security from build server to rabbitmq\n rabbit_lb_sec_grp_name = get_lb_secgrp_name( base_name, app_type )\n rabbit_lb_sec_grp = find_secgrp(ec2_conn, rabbit_lb_sec_grp_name)\n \n try :\n rabbit_lb_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 8443,\n to_port = 8443,\n cidr_ip = build_server_cidr )\n except :\n print \"rule exists aready\" \n \n \n rabbitmq_host = params.get( 'host' )\n rabbitmq_port = params.get( 'port' )\n rabbitmq_username = params.get( 'user-name' )\n rabbitmq_password = params.get( 'password' )\n exchanges = params.get( 'exchanges' )\n \n amqp_url='amqp://'+rabbitmq_username+':'+rabbitmq_password+'@'+rabbitmq_host+':'+rabbitmq_port+'/%2f'\n amqp_url = str(amqp_url)\n parameters = pika.URLParameters(amqp_url)\n connection = pika.BlockingConnection(parameters)\n channel = connection.channel()\n \n \n \n for exchnage in exchanges :\n exchange_name = exchnage.get(\"name\")\n exchange_type = exchnage.get(\"type\")\n queue_name = exchnage.get(\"queue\")\n routings = exchnage.get(\"bindings\")\n channel.exchange_declare(exchange=exchange_name,\n exchange_type=exchange_type,\n durable=True )\n channel.queue_declare(queue=queue_name,\n durable=True)\n for routing in routings :\n channel.queue_bind(queue=queue_name, exchange=exchange_name, routing_key=routing)\n print \"binging exchnage: \" +exchange_name+\", to a queue:\"+queue_name+\" ,with routing key:\"+routing\n \n ## close connection at the end \n connection.close()\n \n ## At the end revoke the build server rule \n try :\n rabbit_lb_sec_grp.revoke( ip_protocol = \"tcp\",\n from_port = 8443,\n to_port = 8443,\n cidr_ip = build_server_cidr)\n \n except :\n print \"exception removing rule\"", "def setup_exchange(self):\n self.logger.info('declaring exchange %s', self.exchange)\n self._channel.exchange_declare(self.on_exchange_declareok, self.exchange, self.exchange_type)", "def setup_queues_and_bindings(self):\n self._channel.exchange_declare(self.setup_queue, exchange=self.exchange, passive=True)", "def test_redelivery_of_rejected_messages_after_restart(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 9999)\n localConfig.requeue_delay = 1\n localConfig.submit_sm_throughput = 1\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 4 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 4:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n msgid = yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 5 seconds before stopping\n yield waitFor(5)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(5)\n\n # Save the count before starting the connector\n _submitRecordsCount = len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Wait for 5 seconds before starting again\n yield waitFor(5)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 10 seconds before stopping , all the rest of the queue must be sent\n yield waitFor(10)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(10)\n\n # Update the counter\n _submitRecordsCount += len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Assertions\n self.assertEqual(_submitRecordsCount, 4)", "def perform_setup():\n global credentials, connection, channel\n credentials = pika.PlainCredentials('guest', 'guest') # AUTH via Default guest user on RabbitMQ\n connection = pika.BlockingConnection(pika.ConnectionParameters(\"127.0.0.1\", 5672, '/', credentials)) # Using rabbit-mq container name to access the RabbitMQ container from other containers\n channel = connection.channel()\n channel.queue_declare(queue='poll', durable=True)", "def on_exchange_declareok(self, unused_frame):\n self.logger.info('exchange declared')\n self.setup_queue()", "def setup_exchange(self):\n LOGGER.info('Setting the exchange with name :%s and type :%s',\n self._exchange, self._type)\n if self._channel is None:\n raise ChannelDoesntExist('The channel doesn''t exist')\n\n if len(self._exchange) < 3:\n raise ExchangeNameDoesntMatch('This exchange name does''nt match')\n # Check if the channel doesn't exist on rabbit\n\n list_rabbit_exchange = [] # Correct me\n if self._exchange in list_rabbit_exchange:\n raise ExchangeAlreadyExist('This exchange is already exist')\n\n # Check Me : self._channel.basic_qos(prefetch_count=1)\n self._channel.exchange_declare(exchange=self._exchange,\n type=self._type,\n durable=self._durable,\n auto_delete=self._auto_delete)", "def schedule_system_restart():\n global _force_system_restart\n _force_system_restart = True", "def install_rabbit (vpc_conn,ec2_conn, elb_conn, cloudwatch_conn , r53_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'RABBITMQ'\n app_name = 'RABBITMQ'\n external_type = 'RABBITMQ-EXT'\n rabbit_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n rabbit_ami_name = base_name + \"-\" + app_type\n rabbit_sec_grp_name = get_secgrp_name( base_name, app_type )\n rabbit_lb_sec_grp_name = get_lb_secgrp_name( base_name, app_type )\n nat_sec_grp_name = get_secgrp_name(base_name, \"NAT\")\n elb_listeners = [ ( '80', '80', 'HTTP' ),( '8080', '8080', 'HTTP' ),( '8443', '8443', 'TCP' ) ]\n ext_elb_name = get_elb_name( base_name, external_type )\n ext_elb_name = ext_elb_name.replace(\"_\",\"-\")\n int_elb_name = get_elb_name( base_name, app_type )\n int_elb_name = int_elb_name.replace( '_', '-' )\n \n rabbit_ami_id = params.get( 'source-ami' )\n rabbit_ami = None\n \n if not rabbit_ami_id or len( rabbit_ami_id ) < 1 :\n rabbit_ami = get_ami_by_name( ec2_conn, rabbit_ami_name )\n if not rabbit_ami :\n print \"Could not find AMI to install RabbitMQ! \" + rabbit_ami_name\n sys.exit( 2 )\n else :\n rabbit_ami = get_ami_by_id( ec2_conn, rabbit_ami_id )\n if not rabbit_ami :\n print \"Could not find AMI to install RabbitMQ! \" + rabbit_ami_id\n sys.exit( 2 )\n \n print \"Creating RabbitMQ Instances\"\n \n rabbit_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n \n rabbit_sec_grp = find_secgrp(ec2_conn, rabbit_sec_grp_name)\n rabbit_lb_sec_grp = find_secgrp(ec2_conn, rabbit_lb_sec_grp_name)\n nat_sec_grp = find_secgrp(ec2_conn, nat_sec_grp_name)\n \n if not rabbit_sec_grp :\n rabbit_sec_grp = create_secgrp ( ec2_conn, vpc, rabbit_sec_grp_name, \"Allows RabbitMQ LB access to the RabbitMQ\" )\n \n if not rabbit_lb_sec_grp :\n rabbit_lb_sec_grp = create_secgrp ( ec2_conn, vpc, rabbit_lb_sec_grp_name, \"Allows HBO access to RabbitMQ LB\" )\n \n ##\n ## Grant all requires access\n ##\n \n try :\n grant_ssh_access ( ec2_conn, [rabbit_sec_grp], nat_sec_grp )\n except :\n print \"Rule exists\"\n \n try :\n grant_grp_self_access ( ec2_conn, rabbit_sec_grp, 0, 40000, protocol = 'tcp' )\n except :\n print \"Rule exists\"\n \n try :\n grant_grp_access ( ec2_conn, [rabbit_lb_sec_grp], rabbit_sec_grp, 8443, protocol = 'tcp' )\n except :\n print \"Rule exists\"\n \n try : \n grant_grp_access ( ec2_conn, [rabbit_lb_sec_grp], rabbit_sec_grp, 8080, protocol = 'tcp' )\n except :\n print \"Rule exists\"\n \n try :\n grant_ssh_access ( ec2_conn, [rabbit_lb_sec_grp], nat_sec_grp )\n except :\n print \"Rule exists\"\n \n \n rabbit_lb_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list ) \n \n rabbit_lb_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 8443,\n to_port = 8443,\n cidr_ip = hbo_cidr_list ) \n \n rabbit_instances_ids = []\n \n rabbit_config = get_rabbit_config(rabbit_subnets)\n \n for subnet in rabbit_subnets :\n cidr_block = subnet.cidr_block\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", rabbit_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n rabbit = launch_instance_vpc( ec2_conn,\n rabbit_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = rabbit_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = rabbit_sec_grp ,\n subnet_id = subnet.id,\n public_ip = False, \n user_data = rabbit_config,\n static_ip_address = instance_private_ip )\n rabbit_instances_ids.append( rabbit.id )\n\n print \"Setting alarms on the rabbit instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, rabbit.id, app_type, base_topicarn, rabbitmq_monitor_rules )\n \n ext_elb = find_elb(elb_conn, ext_elb_name)\n \n if not ext_elb :\n subnets = get_vpc_subnets( vpc_conn, vpc, 'PUBLIC' )\n ext_elb = create_elb ( elb_conn,\n ext_elb_name,\n subnets,\n elb_listeners,\n rabbit_lb_sec_grp,\n \"8080\",\n \"/index.html\",\n True )\n\n int_elb = find_elb( elb_conn, int_elb_name )\n if not int_elb :\n subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n int_elb = create_elb ( elb_conn,\n int_elb_name,\n subnets,\n elb_listeners,\n rabbit_lb_sec_grp,\n \"8080\",\n \"/index.html\",\n False )\n\n print \"Adding RabbitMQ instances into the load balancer.\"\n swap_elb_instances( elb_conn,\n ext_elb,\n rabbit_instances_ids,\n swap_smoothly = True,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn )\n swap_elb_instances( elb_conn,\n int_elb,\n rabbit_instances_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn )\n \n print \"Creating Route53 DNS Entries\"\n ext_dns_name = create_dns_name(base_name, \"amqp\")\n print \"Public DNS: \" + ext_dns_name\n set_dns_cname( r53_conn, ext_dns_name, ext_elb.dns_name )\n\n int_dns_name = create_internal_elb_dns_name( base_name, app_name )\n print \"Private DNS: \" + int_dns_name\n set_dns_cname( r53_conn, int_dns_name, int_elb.dns_name )", "def test_disable_ha_for_rpc_queues_by_default(env):\n controllers = env.get_nodes_by_role('controller')\n controller = random.choice(controllers)\n\n # Install tool on one controller and generate messages\n with controller.ssh() as remote:\n # wait when rabbit will be ok after snapshot revert\n wait_for_rabbit_running_nodes(remote, len(controllers))\n resp_pcs = remote.execute('pcs resource show '\n 'p_rabbitmq-server')['stdout']\n assert (\n filter(\n lambda x: 'enable_notifications_ha=true' in x, resp_pcs) != [] and\n filter(\n lambda x: 'enable_notifications_ha=false' not in x, resp_pcs) != []\n ), 'Disabled HA notifications (should be enabled)'\n\n assert (filter(lambda x: 'enable_rpc_ha=false' in x, resp_pcs) != [] and\n filter(lambda x: 'enable_rpc_ha=true' not in x, resp_pcs) != []), (\n 'Enabled HA RPC (should be disabled)')", "def auto_setup(self):\n if self.mot_type == \"xps8p\":\n return\n if self.get_par(\"err_sevr\") == 3:\n print \"Reinitializing motor {}...\".format(self.name)\n self.reinit()\n ok = self.wait_par(\"err_sevr\", 3, match_value=False, timeout=20)\n if ok:\n print \"Successfully reinitialized {}.\".format(self.name)\n time.sleep(0.5)\n else:\n print \"Reinitializing {} timed out. Aborting auto_setup.\".format(self.name)\n return\n\n for i in range(3):\n for clear, name in ((self.clear_pu, \"powerup\"),\n (self.clear_stall, \"stall flag\"),\n (self.clear_error, \"error flag\")):\n clear(check=True, wait=False)\n\n ok = []\n for bit, mask in ((RA_POWERUP, 1), (RA_STALL, 1), (RA_ERR, RA_ERR_MASK)):\n ok.append(self._wait_msta_bit(bit, 0, mask, timeout=10))\n if not all(ok):\n print \"Issues with clearing flags for {}\".format(self.name)\n\n try: # Not every environment has pmgr access\n self.pmgr.apply_config(dumb_config=self.name)\n except:\n pass", "def main(argc, argv):\n global NUM_PROCESSES, enableProcLogs, enableBqLogs, HELP, useWeibull\n\n print('Process checkpoint-restart simulator')\n random.seed(RANDOM_SEED) # constant seed for reproducibility\n\n # Create an environment and start the setup process\n env = simpy.Environment()\n parser = ap.ArgumentParser(description=HELP, formatter_class=ap.RawTextHelpFormatter)\n parser.add_argument(\"-p\", \"--proc_logs\", action=\"store_true\", help=\"Show run time logs from processes\")\n parser.add_argument(\"-b\", \"--batchqueue_logs\", action=\"store_true\", help=\"Show run time logs from the batch-queue manager\")\n parser.add_argument(\"-n\", \"--procs\", type=int, default=NUM_PROCESSES, help=\"Max. number of processes to simulate (default: 7)\")\n parser.add_argument(\"-x\", \"--no_preempt\", action=\"store_true\", help=\"Disables preemption of currently executing \"\\\n \"job on failure. This simulates the behavior \"\\\n \"of a simple FIFO queue.\")\n parser.add_argument(\"-w\", \"--use-weibull\", action=\"store_true\", help=\"Use Weibull distribution for failure injection. Default is to use exponential distribution\")\n parser.add_argument(\"-f\", \"--file-name\", type=str, help=\"Store lost work/throughput results in the given file.\")\n parser.add_argument(\"-s\", \"--show-throughput-results\", action=\"store_true\", help=\"Show throughput results using matplotlib.\")\n parser.add_argument(\"-l\", \"--show-lostwork-results\", action=\"store_true\", help=\"Show lost work results using matplotlib.\")\n args = parser.parse_args()\n NUM_PROCESSES = args.procs\n MAX_CIRC_Q_LEN = NUM_PROCESSES + 1\n enableProcLogs = args.proc_logs\n enableBqLogs = args.batchqueue_logs\n useWeibull = args.use_weibull\n\n # Create a batch queue\n mymachine = simpy.Resource(env, MAX_PARALLEL_PROCESSES)\n batchQ = BatchQueue(env, MAX_CIRC_Q_LEN, mymachine, args.no_preempt)\n showPlot = args.show_throughput_results | args.show_lostwork_results\n\n testProcesses = [Process(env, 'Process %d' % i, time_to_checkpoint() + random.randint(0, 5) * 10, mymachine)\n for i in range(NUM_PROCESSES)]\n\n simulateArrivalOfJobs(env, testProcesses, batchQ)\n env.process(batchQ.runBq(False))\n # Execute\n env.run()\n\n # Analyis/results\n print(\"******************************************************\")\n print(\"******************FINAL DATA**************************\")\n print(\"******************************************************\")\n\n res = computeResults(args, batchQ)\n saveResults(args, res)\n showResults(args, res)\n\n print(\"Process #, # Ckpts, # Total Failures, # Restarts, # Failed Restarts, # Failed Ckpts, # Preempts,\"\\\n \" Compute Time, Ckpt Time, Lost Work, Lost Restart Time, Lost Ckpt Time, Submission Time, Start Time,\"\\\n \" End Time, Actual Run Time\")\n for p in testProcesses:\n t1 = int(p.numCkpts * p.ckptTime + p.numRestarts * int(p.ckptTime/2.0) + p.lostWork + p.totalComputeTime + p.lostRestartTime)\n t2 = int(p.actualRunTime)\n if not p.restartFailures * p.ckptTime >= p.lostRestartTime:\n print \"Warning\"\n if t1 != t2:\n print(\"Warning: %d != %d\" % (t1, t2))\n print(p)\n if showPlot:\n plt.show()", "def __init__(__self__,\n resource_name: str,\n args: UpgradeRabbitmqArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _setup_tubes(self):\n chan = self.channel\n inp = self.config[self.MODULE_NAME]['amqp']['in']\n out = self.config[self.MODULE_NAME]['amqp']['out']\n if inp['exchange']:\n log.info('generating Input Queue'+ str(inp))\n chan.exchange_declare(**inp)\n self.qname = chan.queue_declare(exclusive=True).queue\n chan.queue_bind(exchange=inp['exchange'],queue=self.qname)\n self.consume = lambda cb : chan.basic_consume(cb,queue=self.qname,no_ack=True)\n self.start_loop = lambda : pika.asyncore_loop()\n\n if out['exchange']:\n log.info('generating Output Exchange'+ str(out))\n chan.exchange_declare(**out)\n self.publish = lambda msg: self.channel.basic_publish(exchange=out['exchange'],routing_key='',body=msg)", "def test_store_recovery(self):\n cluster = HaCluster(self, 1)\n sn = cluster[0].connect().session()\n # Create queue qq, exchange exx and binding between them\n s = sn.sender(\"qq;{create:always,node:{durable:true}}\")\n sk = sn.sender(\"exx/k;{create:always,node:{type:topic, durable:true, x-declare:{type:'direct'}, x-bindings:[{exchange:exx,key:k,queue:qq}]}}\")\n for m in [\"foo\", \"bar\", \"baz\"]: s.send(Message(m, durable=True))\n r = cluster[0].connect().session().receiver(\"qq\")\n self.assertEqual(r.fetch().content, \"foo\")\n r.session.acknowledge()\n # FIXME aconway 2012-09-21: sending this message is an ugly hack to flush\n # the dequeue operation on qq.\n s.send(Message(\"flush\", durable=True))\n\n def verify(broker, x_count):\n sn = broker.connect().session()\n assert_browse(sn, \"qq\", [ \"bar\", \"baz\", \"flush\" ]+ (x_count)*[\"x\"])\n sn.sender(\"exx/k\").send(Message(\"x\", durable=True))\n assert_browse(sn, \"qq\", [ \"bar\", \"baz\", \"flush\" ]+ (x_count+1)*[\"x\"])\n\n verify(cluster[0], 0) # Sanity check\n cluster.bounce(0)\n cluster[0].wait_status(\"active\")\n verify(cluster[0], 1) # Loaded from store\n cluster.start()\n cluster[1].wait_status(\"ready\")\n cluster.kill(0)\n cluster[1].wait_status(\"active\")\n verify(cluster[1], 2)\n cluster.bounce(1, promote_next=False)\n cluster[1].promote()\n cluster[1].wait_status(\"active\")\n verify(cluster[1], 3)", "def acquire_restart(self):\n self.bus.write('ACQ:STATE RUN')", "def on_exchange_declareok(self, unused_frame):\n LOGGER.debug('Exchange declared')\n\n for queue in self._handlers.keys():\n self._channel.queue_declare(self.on_queue_declareok, queue)\n\n RabbitMQ.on_exchange_declareok(self, unused_frame)", "def setup():\n global zb\n # Signal handler (Ctrl+C exit)\n signal.signal(signal.SIGINT, signal_handler) \n # DBus\n session_bus = dbus.SessionBus()\n objXBZB = session_bus.get_object(PROTOCOL_BUS_NAME, PROTOCOL_OBJ_PATH + \"/\" + XBEE_ZB + \"/\" + SOCKET0)\n zb = dbus.Interface(objXBZB, dbus_interface=PROTOCOL_BUS_NAME)", "def cleanup_resources(self, restart=False):", "def HACore():\r\n logging.info('HomeAutomationCore initialized')\r\n threadlist = ThreadList()\r\n sharedqueue = QueueList()\r\n\r\n modules = LoadModulesFromTuple(INSTALLED_APPS)\r\n logging.debug('Loading modules:')\r\n # create threads and so on\r\n for mod in modules:\r\n logging.info(mod)\r\n mt = None\r\n if issubclass(modules[mod].cls, HAWebService): # TODO: too closely coupled\r\n mt = modules[mod].cls(name=mod, callback_function=None, queue=sharedqueue, threadlist=threadlist, modules=modules)\r\n elif issubclass(modules[mod].cls, HomeAutomationQueueThread):\r\n mt = modules[mod].cls(name=mod, callback_function=None, queue=sharedqueue, threadlist=threadlist)\r\n elif issubclass(modules[mod].cls, LEDMatrixBase):\r\n pass # leave these to be created within LEDMatrixCore\r\n else: # assume its the level below (no queue)\r\n logging.debug('Instantiating module ' + mod)\r\n mt = modules[mod].cls(name=mod, callback_function=None)\r\n\r\n if mt != None:\r\n if issubclass(modules[mod].cls, HAWebService):\r\n mt.daemon = True\r\n threadlist.append(mt)\r\n\r\n logging.debug('Starting up module threads')\r\n for ti in threadlist:\r\n ti.start() # start all threads at this point\r\n\r\n timecheck = time.time()\r\n while 1:\r\n # main loop that handles queue and threads, and through executing queue item changes the state of the statemachine\r\n try:\r\n for remote_module in REMOTE_APPS:\r\n remote_addr = remote_module['Address']\r\n remote_apps = remote_module['INSTALLED_APPS']\r\n if not 'socketclient' in remote_module.keys():\r\n remote_module['socketclient'] = LEDMatrixSocketClient(remote_addr) #cache\r\n\r\n for item in [i for i in sharedqueue if i.cls in remote_apps]:\r\n logging.info('Sending queue item to remote host: ' + str(remote_module) )\r\n remote_module['socketclient'].SendSerializedQueueItem(item.__str__())\r\n sharedqueue.remove(item)\r\n time.sleep(0.1)\r\n\r\n if time.time() - timecheck > 10:\r\n timecheck = time.time()\r\n logging.info('10s mainloop interval, number of threads: %d (%s), queue items: %d' %\r\n ( len(threadlist), ', '.join([str(i) for i in threadlist]), len(sharedqueue) ) )\r\n for _thread in threadlist:\r\n if not _thread.isAlive():\r\n logging.info('Removing dead thread: ' + _thread.name)\r\n threadlist.remove(_thread)\r\n # TODO: call other module cleanup (e.g. remove instance ref in webservice globals)\r\n # webservice_state_instances\r\n # and webservice_class_instances\r\n\r\n except KeyboardInterrupt:\r\n logging.info('Detected ctrl+c, exiting main loop and stopping all threads')\r\n break\r\n except:\r\n logging.critical(\"Unexpected error in main loop (exiting): \" + traceback.format_exc() )\r\n break\r\n\r\n logging.debug('Stopping all threads')\r\n for _thread in threadlist:\r\n _thread.stop_event.set() # telling the threads to stop\r", "def _graceful_restart(self, wait):\n\n self._sut.shutdown(True)\n self._sut.start()\n\n if wait:\n sleep(BespokeGlobals.VM_BOOT_WAIT)", "def pre_upgrade_checks(self):\n\n #HostOverview\n Logger.info(\"******************************************************************************************************************************************************\")\n Logger.info(\"\\t\\t\\t\\t\\t\\t\\tHOST OVERVIEW\")\n Logger.info(\"******************************************************************************************************************************************************\")\n print (\"\\n\")\n Logger.info(\"Ambari version\\t\\t:{0}\".format(self.ambari_version))\n\n #Check OS\n os = platform.dist()\n if os[1] != None:\n Logger.info(\"Operating System\\t\\t:{0} {1} - {2}\".format(os[0],os[1],os[2]))\n else:\n Logger.error(\"Unable to fetch OS details.\")\n self.terminate()\n return\n\n self.check_java_version()\n self.check_exactly_one_current_version()\n\n\n #Check if rack awareness is enabled ?\n rack_awareness = \"SELECT DISTINCT rack_info FROM hosts WHERE rack_info!='/default-rack';\"\n self.cursor.execute(rack_awareness)\n result = self.cursor.fetchone()\n if result is None or len(result) != 1:\n Logger.info(\"Rack Awareness ?\\t\\tNo\\n\")\n else:\n Logger.info(\"Rack Awareness ?\\t\\tYes\\n\")\n\n #Security Overview\n self.check_security()\n\n #Check High Availability configuration\n self.check_high_availability()\n\n #Check Metastores\n self.check_metastore()", "def _auto_spark(self):\n self.auto_queue.clean_up(emphasis=self.emphasis[\"redundant\"])\n if not self.auto_queue.is_busy and not self.clear_queue.is_busy:\n self.auto_queue.is_busy = True\n self.process(self.auto_queue)\n\n self.hyper_queue.clean_up(emphasis=self.emphasis[\"redundant\"])\n if not self.hyper_queue.is_busy and not self.clear_queue.is_busy:\n self.hyper_queue.is_busy = True\n self.process(self.hyper_queue)", "def on_exchange_declareok(self, _unused_frame):\n self._channel_ctrl.queue_declare(\n '',\n exclusive=True,\n auto_delete=True,\n callback=self.on_queue_declareok\n )", "def test_ipcrm_queues_not_isntalled(): # pragma: windows\n IPCComm.ipcrm_queues()", "def setup():\n global RBD_POOL, RBD_STORAGE_CLASS, RBD_SECRET, CEPHFS_OBJ, \\\n CEPHFS_STORAGE_CLASS, CEPHFS_SECRET, RBD_PVC, CEPHFS_PVC\n log.info(\"Creating RBD Pool\")\n RBD_POOL = helpers.create_ceph_block_pool()\n\n log.info(\"Creating RBD Secret\")\n RBD_SECRET = helpers.create_secret(constants.CEPHBLOCKPOOL)\n\n log.info(\"Creating RBD StorageClass\")\n RBD_STORAGE_CLASS = helpers.create_storage_class(\n constants.CEPHBLOCKPOOL, RBD_POOL.name, RBD_SECRET.name\n )\n\n log.info(\"Creating CephFilesystem\")\n CEPHFS_OBJ = helpers.create_cephfilesystem()\n\n log.info(\"Creating FS Secret\")\n CEPHFS_SECRET = helpers.create_secret(constants.CEPHFILESYSTEM)\n\n log.info(\"Creating FS StorageClass\")\n CEPHFS_STORAGE_CLASS = helpers.create_storage_class(\n constants.CEPHFILESYSTEM, helpers.get_cephfs_data_pool_name(),\n CEPHFS_SECRET.name\n )\n\n log.info(\"Creating RBC PVC\")\n RBD_PVC = helpers.create_pvc(sc_name=RBD_STORAGE_CLASS.name)\n\n log.info(\"Creating CephFs PVC\")\n CEPHFS_PVC = helpers.create_pvc(sc_name=CEPHFS_STORAGE_CLASS.name)", "def _pre_deploy_legacy_ltm_cleanup(self):\n\n # Detect legacy names (nodes do not include the route domain)\n self._bigip.refresh_ltm()\n existing_nodes = self._bigip.get_nodes()\n node_list = list(existing_nodes.keys())\n for node_name in node_list:\n route_domain = split_ip_with_route_domain(node_name)[1]\n if route_domain is None:\n break\n else:\n return\n\n existing_iapps = self._bigip.get_app_svcs()\n existing_virtuals = self._bigip.get_virtuals()\n existing_policies = self._bigip.get_l7policies()\n existing_irules = self._bigip.get_irules()\n existing_internal_data_groups = self._bigip.get_internal_data_groups()\n existing_pools = self._bigip.get_pools()\n\n delete_iapps = self._get_resource_tasks(existing_iapps, {})[2]\n delete_virtuals = self._get_resource_tasks(existing_virtuals, {})[2]\n delete_policies = self._get_resource_tasks(existing_policies, {})[2]\n delete_irules = self._get_resource_tasks(existing_irules, {})[2]\n delete_internal_data_groups = self._get_resource_tasks(\n existing_internal_data_groups, {})[2]\n delete_pools = self._get_resource_tasks(existing_pools, {})[2]\n delete_monitors = self._get_monitor_tasks({})[2]\n delete_nodes = self._get_resource_tasks(existing_nodes, {})[2]\n\n delete_tasks = delete_iapps + delete_virtuals + delete_policies + \\\n delete_irules + delete_internal_data_groups + delete_pools + \\\n delete_monitors + delete_nodes\n taskq_len = len(delete_tasks)\n\n finished = False\n LOGGER.debug(\"Removing legacy resources...\")\n while not finished:\n LOGGER.debug(\"Legacy cleanup service task queue length: %d\",\n taskq_len)\n\n # Must remove all resources that depend on nodes (vs, pools, ???)\n delete_tasks = self._delete_resources(delete_tasks)\n\n tasks_remaining = len(delete_tasks)\n\n # Did the task queue shrink?\n if tasks_remaining >= taskq_len or tasks_remaining == 0:\n # No, we have stopped making progress.\n finished = True\n\n # Reset the taskq length.\n taskq_len = tasks_remaining", "def setup_error_queue(self):\n logger.info('Declaring error queue %s', self.error_queue_name)\n\n self._channel.queue_declare(self.on_error_queue_declareok,queue=self.error_queue_name, durable=True, exclusive=False)", "def setup_amq_kafka_bridge(self):\n try:\n kafka_bridge = templating.load_yaml(os.path.join(self.dir, self.amq_kafka_bridge_yaml))\n self.kafka_bridge = OCS(**kafka_bridge)\n self.kafka_bridge.create()\n except(CommandFailed, CalledProcessError) as cf:\n log.error('Failed during setup of AMQ KafkaConnect')\n raise cf\n # Making sure the kafka_bridge is running\n if self.is_amq_pod_running(pod_pattern=\"my-bridge-bridge\"):\n return self.kafka_bridge\n else:\n raise ResourceWrongStatusException(\"kafka_bridge_pod pod is not getting to running state\")" ]
[ "0.5795612", "0.55824107", "0.54487985", "0.54377425", "0.52643657", "0.5207759", "0.51595104", "0.51521397", "0.5136054", "0.5055962", "0.5046804", "0.49983096", "0.49463856", "0.49236155", "0.48859736", "0.48449838", "0.47945452", "0.477098", "0.47691527", "0.4756401", "0.47506055", "0.47398916", "0.47261605", "0.4703167", "0.4693551", "0.46884346", "0.4661809", "0.46377915", "0.46360162", "0.46308935" ]
0.6884117
0
Adds a persistence interval as child to this along with its points
def appendChild(self, child): self.points += child.points.copy() self.children.append(child)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def append(self, interval):\n self.intervals.append(copy.deepcopy(interval))", "def add_child(self, child: UIComponent):\n child.parent = self\n child.set_chronometer(self._chronometer)\n self.children.append(child)\n if self.props.resize_mode == ResizeMode.AUTO:\n self._reset('add_child')", "def __iadd__(self, point):\n self.points.append(point)\n return self", "def append(self,x):\n if x is None:\n self.last = None\n elif self.last is None:\n self.last = x\n self.dlast = 0\n else:\n self.dlast = x-self.last\n LiveStat.append(self,float(self.dlast))\n self.last = x", "def add_child(self, child):\r\n self.children.append(child)", "def commit(self):\n amplitude, pos, left, right = self.__amplitude, self.__pos, self.__left, self.__right\n data = self.__data\n start = left / data.shape[-1]\n end = right / data.shape[-1]\n zlp_interval = self.computation.get_result(\"zlp_interval\", None)\n if not zlp_interval:\n zlp_interval = self.__src.add_interval_region(start, end)\n self.computation.set_result(\"zlp_interval\", zlp_interval)\n zlp_interval.interval = start, end\n zlp_interval.graphic_id = \"zlp_interval\"\n zlp_interval._graphic.color = \"#0F0\"", "def _assign_interval(self, interval):\n # Assigning the new value to the interval attribute of the object\n self._interval = interval", "def insert(self, interval):\n\t\tif not self.head and not self.tail:\n\t\t\tself.head = interval\n\t\t\tself.tail = interval\n\t\telif interval.start <= (self.tail.end + 1):\n\t\t\tself.tail.end = interval.end\n\t\telse:\n\t\t\tself.tail.next = interval\n\t\t\tself.tail = interval", "def __init__(self):\n self.__intervals__ = []", "def add_child(self, name, actual, desired):\n self.children.append(\n self.Children(name, actual, desired, actual * self.value,\n (desired - actual) * self.value))", "def add_child(self, child):\r\n \r\n self._children.append(child)\r\n self.update_batch(self._batch, self._group)", "def addPoints(self, points):\r\n self.points = points", "def __finalize_if_needed(self) -> None:\n penultimate_measure_end = N_EIGHTHS_PER_MEASURE * (self.n_measures - 1)\n if self.current_time_in_eighths < penultimate_measure_end:\n return\n end_line_element = LineElement(\n self.end_scale_element,\n penultimate_measure_end,\n self.total_duration_in_eighths\n )\n self.counterpoint.append(end_line_element)\n self.__add_to_piano_roll(end_line_element)\n last_movement = (\n self.end_scale_element.position_in_degrees\n - self.counterpoint[-2].scale_element.position_in_degrees\n )\n self.past_movements.append(last_movement)\n self.current_time_in_eighths = self.total_duration_in_eighths", "def add_child(self, p, elem):\n node = self._validate(p)\n child = self._Node(elem, idx=self._curr_idx, parent=node)\n self._curr_idx += 1\n node._children.append(child)\n self._size += 1\n\n # Invalidate depths and heights after modifying the tree.\n self._depths, self._heights = None, None\n return self._make_position(child)", "def add_child(self, child):\r\n self.children.append(child)", "def store(self):\n store_moments = self.steps_performed % self.meas_every[0] == 0\n store_coords = self.steps_performed % self.meas_every[1] == 0\n if not (store_moments or store_coords):\n return\n Xp = np.copy(self.bunch.X[:, [1, 3]])\n self.kick(+0.5 * self.ds) # sync positions/slopes\n if store_moments:\n self.history.store_moments(self.s)\n if store_coords:\n self.history.store_coords(self.s)\n self.bunch.X[:, [1, 3]] = Xp", "def _newChild(self, child):\n self._testKeySubNsAdd()\n self._getSubNsList().append(child)", "def InsertNextPoint(self, ):\n ...", "def mark_add(self,duracion):\n mark = MarcaDeTiempo(duracion)\n self.tiempos.insert(self.tiempos.posicion_actual(),mark)\n self.tiempos.actualizar()", "def add_point(self, pt):\n self.points.append(pt)", "def add_child(self, m, p):\n\t\tn = Node(move=m, parent=self, player_just_moved=p)\n\t\tself.child_nodes.append(n)\n\t\treturn n", "def __init__(self,interval):\n _interval = interval", "def persist(\n self,\n delay_seconds: int,\n *args: Any,\n **kwargs: Any\n ) -> ScheduledEvent:\n\n event = self.add(\n delay_seconds,\n *args,\n **kwargs\n )\n\n event.persist()\n\n return event", "def add(self, point):\n self.points.append(point)", "def append(self, point):\n self.points.append(point)\n return self", "def grow(self, start_period=1, cascade=True):\n end_period = start_period + 1 if not cascade else self.parent.horizon\n for p in range(start_period, end_period):\n self.reset_areas(p+1) #, self._areas[p], self._areas[p+1] # WTF?\n #for age, area in list(self._areas[p].items()): self._areas[p+1][age+1] = area\n for age, area in list(self._areas[p].items()): self._areas[p+1][age+self.parent.period_length] = area", "def set_timepoints(self):\n unixtime = self.created.timestamp() # float\n self.timepoints = unixtime + self.points # TODO: calc a sort value!", "def add_child(self, chromosome):\n self.next_population.append(to_chromosome(chromosome))", "def add_pose(self):\n base_pose = PoseStamped()\n try:\n # Convert pose to base frame\n self.data['poses']['marker'].header.stamp = \\\n self.tfl.getLatestCommonTime(self.params['world'],\n self.data['poses']\n ['marker'].header.frame_id)\n base_pose = self.tfl.transformPose(self.params['world'],\n self.data['poses']['marker'])\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" +\n self.data['poses']['marker'].header.frame_id)\n\n self.data['poses']['path'].header.frame_id = self.params['world']\n self.data['poses']['path'].header.stamp = Time.now()\n self.data['poses']['path'].poses.append(deepcopy(base_pose.pose))\n\n Me.info_message(self.data['poses']['path'])\n return", "def save(self, *args, **kwargs):\n self.small_particles = (int(self.dylos_bin_1) +\n int(self.dylos_bin_2) + int(self.dylos_bin_3))\n self.big_particles = int(self.dylos_bin_4)\n super(Dylos, self).save(*args, **kwargs)" ]
[ "0.5290929", "0.52899754", "0.5150174", "0.5115925", "0.50472325", "0.49198186", "0.48833942", "0.4872159", "0.48698753", "0.4853984", "0.48440596", "0.48227778", "0.4820813", "0.48132288", "0.4789898", "0.47870472", "0.47661018", "0.47631806", "0.47591513", "0.47563797", "0.47560063", "0.4749803", "0.47350183", "0.47344863", "0.47319072", "0.47247738", "0.47087577", "0.47075808", "0.46950102", "0.46949887" ]
0.5965832
0
Computes the filtration of the function which values are stored in x Return a single persistence interval which is the father of all the others
def get_filtration(self, x): n = x.shape[0] s = sorted([(i, x[i]) for i in range(n)], key=lambda x: x[1]) selected = [False for i in range(n)] sets = {} ancestor = {i: i for i in range(n)} i = 0 while False in selected: newpoint = s[i] j = s[i][0] val = s[i][1] selected[j] = True if j == 0 and selected[1]: ancestor[0] = ancestor[1] sets[ancestor[1]].appendPoint(0) elif j == 0: sets[0] = PersistenceInterval(0, val) elif j == n - 1 and selected[n - 2]: ancestor[n - 1] = ancestor[n - 2] sets[ancestor[n - 2]].appendPoint(n -1) elif j == n - 1: sets[n - 1] = PersistenceInterval(n - 1, val) elif selected[j - 1] and selected[j + 1]: i_a = ancestor[j - 1] i_b = ancestor[j + 1] a = x[i_a] b = x[i_b] if a < b: ancestor[j] = i_a for key in range(n): if ancestor[key] == i_b: ancestor[key] = i_a sets[i_b].death = val sets[i_b].appendPoint(j) sets[i_a].appendChild(sets[i_b]) sets[i_a].appendPoint(j) else: ancestor[j] = i_b for key in range(n): if ancestor[key] == i_a: ancestor[key] = i_b sets[i_a].death = val sets[i_a].appendPoint(j) sets[i_b].appendChild(sets[i_a]) sets[i_b].appendPoint(j) elif selected[j - 1]: ancestor[j] = ancestor[j - 1] sets[ancestor[j - 1]].appendPoint(j) elif selected[j + 1]: ancestor[j] = ancestor[j + 1] sets[ancestor[j + 1]].appendPoint(j) else: sets[j] = PersistenceInterval(j, val) i += 1 sets[s[0][0]].death = self.infty setList = sorted([sets[i] for i in sets.keys()], key=lambda x:x.getRelevance(), reverse=True) self.sets = setList return setList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_filter(self, x):\n raise NotImplementedError", "def __call__(self,x):\n\n arr = np.array(x,copy=False,dtype=float)\n return self._filterfunc(arr,*self.parvals)", "def x_density_function(self, x):\n return self.wavefunction(x) * self.wavefunction(x)", "def apply(cls, x):\n return np.maximum(0, np.minimum(1, 0.2*x + 0.5))", "def __call__(self,x):\n arr = np.array(x,copy=False,dtype=float)\n res = self._filterfunc(arr.ravel(),*self.parvals)\n return res.reshape(arr.shape)", "def input_function(given_x):\n\tkeys = signal_Assignment.keys()\n\tkeys.sort\n\n\tfor i in range(len(signal_Assignment)):\n\t\tif given_x > keys[-1]:\n\t\t\treturn signal_Assignment[keys[-1]]\n\t\telif given_x > keys[i] and given_x < keys[i+1]:\n\t\t\tif given_x < (keys[i]+keys[i+1])/2:\n\t\t\t\treturn signal_Assignment[keys[i]]\n\t\t\telse:\n\t\t\t\treturn signal_Assignment[keys[i+1]]\n\t\telif given_x == keys[i]:\n\t\t\treturn signal_Assignment[keys[i]]", "def fn(x):\n ans = x\n for xx in graph.get(x, []): \n if quiet[fn(xx)] < quiet[ans]: ans = fn(xx)\n return ans", "def fluence(self, range_x):\n time_step = np.diff(range_x)\n val_square = np.square(np.abs(self(range_x)))\n res = np.sum(np.array(val_square[:-1] * time_step))\n res = res/(range_x[-1] - range_x[0])\n return res", "def fitness(self, x):\n u = np.asarray([x[0]])\n C = self.C_func(u)\n P = self.P\n return np.asarray([np.sum(np.sum((C - P) ** 2, axis=0) ** (1 / 2))])", "def f(x):\n\treturn (sc.log(x**2+5)*sc.cos(0.8*x)+3.5*x)/(sc.e**(x/10))", "def _call(self, x):\n if self.prior is None:\n tmp = (1 - x + scipy.special.xlogy(x, x)).inner(self.domain.one())\n else:\n tmp = ((self.prior - x + scipy.special.xlogy(x, x / self.prior))\n .inner(self.domain.one()))\n if np.isnan(tmp):\n # In this case, some element was less than or equal to zero\n return np.inf\n else:\n return tmp", "def ppf(self,x):\n if x > 1.0 or x < 0:\n self.raiseAnError(IOError,'Categorical distribution cannot calculate ppf for', str(x), '! Valid value should within [0,1]!')\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == 1.0:\n return float(sortedMapping[-1][0]) if self.isFloat else sortedMapping[-1][0]\n else:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if cumulative >= x:\n return float(element[0]) if self.isFloat else element[0]", "def FMScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 1\n elif x <= d[p][0.4]:\n return 2\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 4\n else:\n return 5", "def FMScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 1\n elif x <= d[p][0.4]:\n return 2\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 4\n else:\n return 5", "def single_variable_cut(x, xmin, xmax, \n inclusion_convention=(False, False)):\n\n if inclusion_convention==(True, True):\n return (x >= xmin) & (x <= xmax)\n elif inclusion_convention==(True, False):\n return (x >= xmin) & (x < xmax)\n elif inclusion_convention==(False, True):\n return (x > xmin) & (x <= xmax)\n else:\n return (x > xmin) & (x < xmax)", "def f(self, x: np.array) -> np.array:\n return (1/np.sqrt(2*np.pi*self.sig**2))*np.exp(-1*((x - self.mu)**2/(2*self.sig**2)))", "def conditional_aitken(f, x):\n condition = True\n x = x.copy()\n gg = x\n np_abs = np.abs\n while condition:\n try:\n g, condition = f(x)\n except:\n x = gg.copy()\n g, condition = f(x)\n if not condition: return g\n gg, condition = f(g)\n dxg = x - g\n dummy = gg + dxg - g\n mask = np_abs(dummy) > 1e-16\n x[mask] -= dxg[mask]**2/dummy[mask]", "def _call(self, x):\n if self.prior is None:\n tmp = -1.0 * (np.log(1 - x)).inner(self.domain.one())\n else:\n tmp = (-self.prior * np.log(1 - x)).inner(self.domain.one())\n if np.isnan(tmp):\n # In this case, some element was larger than or equal to one\n return np.inf\n else:\n return tmp", "def discount(x, gamma):\n\n return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]", "def discount(x, gamma):\n return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]", "def _call(self, x):\n if functional.prior is None:\n return (-1.0) / x + 1\n else:\n return (-functional.prior) / x + 1", "def fn(x):\n ans = rsm = ii = 0 \n for i in range(len(nums)): \n rsm += nums[i]\n while rsm > x: # sliding window \n rsm -= nums[ii]\n ii += 1\n ans += i - ii + 1\n return ans", "def filter_detect(self, x):\n b, a = self.c_detect\n return filtfilt(b, a, x)", "def filter_denoise(self, x):\n b, a = self.c_notch\n return filtfilt(b, a, x)", "def _call(self, x):\n if functional.prior is None:\n return 1.0 / (1 - x)\n else:\n return functional.prior / (1 - x)", "def _call(self, x):\n if self.prior is None:\n tmp = ((x - 1 - np.log(x)).inner(self.domain.one()))\n else:\n # This is the old line from odl version 0.6.0.\n # tmp = ((x - self.prior + self.prior * np.log(self.prior / x))\n tmp = ((x - self.prior + self.prior * np.log((self.prior + 1e-12) / x))\n .inner(self.domain.one()))\n if np.isnan(tmp):\n # In this case, some element was less than or equal to zero\n return np.inf\n else:\n return tmp", "def apply(cls, x):\n return np.maximum(-1, np.minimum(1, x))", "def steff(f, x: float):\n print(x)\n if g(f, x)(x) != 0:\n yield x - f(x) / g(f, x)(x) # First give x_n + 1\n yield from steff(f, x - f(x) / g(f, x)(x)) # Then give new iterator", "def g(f, x: float):\n return lambda x: f(x + f(x)) / f(x) - 1", "def gaussian_filter(x):\n return _gaussian_filter(x, 3)" ]
[ "0.66155213", "0.6502836", "0.6244583", "0.6191562", "0.6171329", "0.60968804", "0.5972131", "0.58754563", "0.58578396", "0.583398", "0.58136237", "0.5768855", "0.57474226", "0.57474226", "0.5723264", "0.5715423", "0.57015836", "0.568552", "0.5681059", "0.56740314", "0.5636921", "0.5617656", "0.5615879", "0.5605152", "0.5602691", "0.56019914", "0.55883104", "0.5585507", "0.55704707", "0.55636615" ]
0.73229456
0
Testing {% ageid %} with now
def test_with_now(self): self.assertEqual(ageid(self.now), 'age1')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_now_minus_1_day(self):\n self.assertEqual(ageid(self.now - timedelta(1)), 'age2')", "def test_with_now_minus_4_days(self):\n self.assertEqual(ageid(self.now - timedelta(4)), 'age5')", "def test_with_now_minus_2_days(self):\n self.assertEqual(ageid(self.now - timedelta(2)), 'age3')", "def test_with_now_minus_3_days(self):\n self.assertEqual(ageid(self.now - timedelta(3)), 'age4')", "def test_with_non_datetime(self):\n class Foo:\n def __init__(self, now):\n self.day = now.day\n self.month = now.month\n self.year = now.year\n\n self.assertEqual(ageid(Foo(self.now)), 'age1')", "def age(min=1, max=99):\r\n\r\n return '%.i' % ((_random.randint(min, max + 1) if min\r\n and max else _random.randint(1, 100)))", "def details():\n now_dt = dt.datetime.now()\n return render_template(\n 'resume/home.html',\n age=relativedelta(now_dt, dt.datetime(day=19, month=3, year=1983)).years,\n current_year=now_dt.year,\n )", "def age():\n return 1", "def age():\n return 1", "def age(self):\n return datetime.now().year - self.birth_day.year", "def age(self):\n\n years, months, days = calculate_age(self.birthdate)\n if years:\n return \"%d year%s old\" % (years, \"s\" if years > 1 else \"\")\n elif months:\n return \"%d month%s old\" % (months, \"s\" if months > 1 else \"\")\n else:\n return \"%d day%s old\" % (days, \"s\" if days > 1 else \"\")", "def age(self):\n self._age += 1", "def age(self) -> str:\n tdelta = dt.now() - self.created_timestamp\n if tdelta.days >= 548: # enough to round it up to 2 years\n return f'about {tdelta.days/365:.0f} years'\n elif tdelta.days >= 345: # enough to round it up to 1 year (so it doesn't report '12 months')\n return f'about a year'\n elif tdelta.days > 45: # beyond 1 month (after rounding)\n return f'about {tdelta.days/30:.0f} months'\n elif tdelta.days > 24: # enough to round it up to 1 month (so it doesn't report '4 weeks')\n return f'about a month'\n elif tdelta.days > 7:\n # round to nearest half, dropping '.0' when whole\n return f'{round((tdelta.days/7)*2)/2:g} weeks'\n elif tdelta.days == 7:\n return 'a week'\n elif tdelta.days > 1:\n return f'{tdelta.days} days'\n elif tdelta.days == 1:\n return f'a day'\n # break it down into parts of a day\n hours = tdelta.seconds // 3600\n if hours > 1:\n return f'{hours:.0f} hours'\n elif hours == 1:\n return f'an hour'\n minutes = tdelta.seconds % 3600 / 60\n if minutes > 1:\n return f'{minutes:.0f} minutes'\n elif minutes == 1:\n return f'a minute'\n return 'moments'", "def age(self):\n today = date.today()\n birthday = datetime.strptime(self.birt['date'], \"%d %b %Y\")\n return birthday", "def _get_age(self):\n return self.__age", "def age(self, age):\n\n self._age = age", "def age(self, age):\n\n self._age = age", "def testClinicalPatientAge(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"age\")\n\n self.util.intPropertyTest(self, attr, \"age\")", "def get_age(self):\r\n return self.age", "def get_age(self):\n today = datetime.now()\n return today.year \\\n - self.date_of_birth.year \\\n - ((today.month, self.date_of_birth.day) \\\n < (self.date_of_birth.month, self.date_of_birth.day))", "def _perAgeChoiceSelector(self, params):\n\n entity = params['entity']\n\n birth_date = entity.birth_date\n today = params.get('today', date.today())\n\n days = today - birth_date\n days -= timedelta(days=calendar.leapdays(birth_date.year, today.year))\n if calendar.isleap(today.year) and today.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n if calendar.isleap(birth_date.year) and birth_date.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n\n return str(days.days / 365)", "def getAge(self):\r\n return self.age", "def get_age(self):\n\t\treturn self.age", "def test_person_loads_and_renders(self):\n response = self.client.get(reverse('main'))\n request_data = RequestData.objects.latest('pub_date')\n try:\n template = '{% load edit_link %}{% edit_link object %}'\n context = {'object': request_data}\n rendered = Template(template).render(Context(context))\n except:\n self.fail(\"raised exception while template rendering\")\n self.assertEquals(rendered, '<a href=\"/admin/hello/requestdata/%s/\">(admin)</a>' % str(request_data.pk))", "def age(self):\r\n return self._age", "def is_vintage(self):\n return self.get_age()>=AGE", "def age(self):\n # TODO(user) move up to AFF4Object after some analysis of how .age is\n # used in the codebase.\n aff4_type = self.Get(self.Schema.TYPE)\n\n if aff4_type:\n return aff4_type.age\n else:\n # If there is no type attribute yet, we have only just been created and\n # not flushed yet, so just set timestamp to now.\n return rdfvalue.RDFDatetime().Now()", "def test_template_matcher(self):\n from srsly.ruamel_yaml.serializer import templated_id\n\n assert templated_id(u\"id001\")\n assert templated_id(u\"id999\")\n assert templated_id(u\"id1000\")\n assert templated_id(u\"id0001\")\n assert templated_id(u\"id0000\")\n assert not templated_id(u\"id02\")\n assert not templated_id(u\"id000\")\n assert not templated_id(u\"x000\")", "def age(self):\n return self.__age", "def happy_birthday(name, age: hug.types.number = 1):\n return \"Happy {age} Birthday {name}!\".format(**locals())" ]
[ "0.69662714", "0.6784399", "0.66615933", "0.6583668", "0.5942472", "0.5604099", "0.5554456", "0.54843175", "0.54843175", "0.54263604", "0.53355616", "0.51519006", "0.51454365", "0.5122762", "0.51186895", "0.5117891", "0.5117891", "0.50775474", "0.50633246", "0.5044899", "0.5044541", "0.5042304", "0.5035039", "0.5030023", "0.50212127", "0.5005602", "0.50007623", "0.4992749", "0.49607652", "0.49483228" ]
0.735791
0
Testing {% ageid %} with yesterday
def test_with_now_minus_1_day(self): self.assertEqual(ageid(self.now - timedelta(1)), 'age2')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_now_minus_2_days(self):\n self.assertEqual(ageid(self.now - timedelta(2)), 'age3')", "def test_with_now_minus_4_days(self):\n self.assertEqual(ageid(self.now - timedelta(4)), 'age5')", "def test_with_now_minus_3_days(self):\n self.assertEqual(ageid(self.now - timedelta(3)), 'age4')", "def test_with_now(self):\n self.assertEqual(ageid(self.now), 'age1')", "def age(self):\n return datetime.now().year - self.birth_day.year", "def test_with_non_datetime(self):\n class Foo:\n def __init__(self, now):\n self.day = now.day\n self.month = now.month\n self.year = now.year\n\n self.assertEqual(ageid(Foo(self.now)), 'age1')", "def SAgeDdt(ddt):\n if ddt.days < 0:\n return \"in the future?\"\n months = int(ddt.days*12/365)\n years = int(ddt.days/365)\n if years >= 1:\n return \"%d year%s ago\" % (years, SPlural(years))\n if months >= 3:\n return \"%d months ago\" % months \n if ddt.days == 1:\n return \"yesterday\"\n if ddt.days > 1:\n return \"%d days ago\" % ddt.days\n hrs = int(ddt.seconds/60/60)\n if hrs >= 1:\n return \"%d hour%s ago\" % (hrs, SPlural(hrs))\n minutes = round(ddt.seconds/60)\n if minutes < 1:\n return \"seconds ago\"\n return \"%d minute%s ago\" % (minutes, SPlural(minutes))", "def age_diff(self, other):\n diff = self.age - other.age\n print(abs(diff), \"year difference\")", "def test_date_accept_yesterday(self):\n import dateutil.relativedelta\n spi_search = \"find date yesterday\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today()+dateutil.relativedelta.relativedelta(days=-1), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)", "def age(self):\n\n years, months, days = calculate_age(self.birthdate)\n if years:\n return \"%d year%s old\" % (years, \"s\" if years > 1 else \"\")\n elif months:\n return \"%d month%s old\" % (months, \"s\" if months > 1 else \"\")\n else:\n return \"%d day%s old\" % (days, \"s\" if days > 1 else \"\")", "def details():\n now_dt = dt.datetime.now()\n return render_template(\n 'resume/home.html',\n age=relativedelta(now_dt, dt.datetime(day=19, month=3, year=1983)).years,\n current_year=now_dt.year,\n )", "def age(self):\n today = date.today()\n birthday = datetime.strptime(self.birt['date'], \"%d %b %Y\")\n return birthday", "def age(self):\n today = datetime.date.today()\n\n return today.year - int(self.birthday[0:4])", "def _perAgeChoiceSelector(self, params):\n\n entity = params['entity']\n\n birth_date = entity.birth_date\n today = params.get('today', date.today())\n\n days = today - birth_date\n days -= timedelta(days=calendar.leapdays(birth_date.year, today.year))\n if calendar.isleap(today.year) and today.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n if calendar.isleap(birth_date.year) and birth_date.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n\n return str(days.days / 365)", "def age(self):\n self._age += 1", "def get_age(self):\n return CURRENT_YEAR - self.year", "def get_age(self):\n return CURRENT_YEAR - self.year", "def age(self):\n delta = now() - self.creation\n return delta.seconds", "def is_vintage(self):\n return self.get_age()>=AGE", "def age():\n return 1", "def age():\n return 1", "def _get_age(self):\n for employee in self:\n if employee.sudo().birthday:\n employee.age = relativedelta(\n fields.Date.from_string(fields.Date.today()),\n fields.Date.from_string(employee.sudo().birthday)).years\n else:\n employee.age = 0", "def yesterday():\n return datetime.today() - timedelta(1)", "def get_age(self):\n today = datetime.now()\n return today.year \\\n - self.date_of_birth.year \\\n - ((today.month, self.date_of_birth.day) \\\n < (self.date_of_birth.month, self.date_of_birth.day))", "def test_ave_age_range(step):\n diff = step[\"ave_birth\"] - step[\"birth\"]\n assert 0 < diff < 15E6", "def age(self, age):\n\n self._age = age", "def age(self, age):\n\n self._age = age", "def get_age(self):\n age = CURRENT_YEAR - self.year\n return age", "def get_age(self):\n return Guitar.CURRENT_YEAR - self.year", "def _set_age(cls, data):\n birth = data.get(\"birth\")\n if birth:\n today = datetime.now().date()\n data[\"age\"] = relativedelta(today, birth).years\n return data" ]
[ "0.6852591", "0.68312496", "0.6746986", "0.63439506", "0.55546343", "0.5394519", "0.5380408", "0.53524274", "0.532431", "0.53095055", "0.5292067", "0.5264989", "0.52530444", "0.5232551", "0.5178952", "0.5160872", "0.5160872", "0.51382077", "0.5128389", "0.51267356", "0.51267356", "0.5124323", "0.5105932", "0.5101296", "0.5100289", "0.50908893", "0.50908893", "0.508964", "0.5070814", "0.50631166" ]
0.71235526
0
Testing {% ageid %} with two days ago
def test_with_now_minus_2_days(self): self.assertEqual(ageid(self.now - timedelta(2)), 'age3')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_now_minus_1_day(self):\n self.assertEqual(ageid(self.now - timedelta(1)), 'age2')", "def test_with_now_minus_3_days(self):\n self.assertEqual(ageid(self.now - timedelta(3)), 'age4')", "def test_with_now_minus_4_days(self):\n self.assertEqual(ageid(self.now - timedelta(4)), 'age5')", "def test_with_now(self):\n self.assertEqual(ageid(self.now), 'age1')", "def age_diff(self, other):\n diff = self.age - other.age\n print(abs(diff), \"year difference\")", "def age(self):\n return datetime.now().year - self.birth_day.year", "def age(self):\n\n years, months, days = calculate_age(self.birthdate)\n if years:\n return \"%d year%s old\" % (years, \"s\" if years > 1 else \"\")\n elif months:\n return \"%d month%s old\" % (months, \"s\" if months > 1 else \"\")\n else:\n return \"%d day%s old\" % (days, \"s\" if days > 1 else \"\")", "def SAgeDdt(ddt):\n if ddt.days < 0:\n return \"in the future?\"\n months = int(ddt.days*12/365)\n years = int(ddt.days/365)\n if years >= 1:\n return \"%d year%s ago\" % (years, SPlural(years))\n if months >= 3:\n return \"%d months ago\" % months \n if ddt.days == 1:\n return \"yesterday\"\n if ddt.days > 1:\n return \"%d days ago\" % ddt.days\n hrs = int(ddt.seconds/60/60)\n if hrs >= 1:\n return \"%d hour%s ago\" % (hrs, SPlural(hrs))\n minutes = round(ddt.seconds/60)\n if minutes < 1:\n return \"seconds ago\"\n return \"%d minute%s ago\" % (minutes, SPlural(minutes))", "def age(birthdate):\n today=date.today()\n birthdate=date(birthdate[2],birthdate[1],birthdate[0])\n if birthdate>today:\n return \"Person has not been born yet!\"\n difference=today-birthdate\n return difference.days", "def get_age(self):\n today = datetime.now()\n return today.year \\\n - self.date_of_birth.year \\\n - ((today.month, self.date_of_birth.day) \\\n < (self.date_of_birth.month, self.date_of_birth.day))", "def age(self):\n delta = now() - self.creation\n return delta.seconds", "def age(self) -> str:\n tdelta = dt.now() - self.created_timestamp\n if tdelta.days >= 548: # enough to round it up to 2 years\n return f'about {tdelta.days/365:.0f} years'\n elif tdelta.days >= 345: # enough to round it up to 1 year (so it doesn't report '12 months')\n return f'about a year'\n elif tdelta.days > 45: # beyond 1 month (after rounding)\n return f'about {tdelta.days/30:.0f} months'\n elif tdelta.days > 24: # enough to round it up to 1 month (so it doesn't report '4 weeks')\n return f'about a month'\n elif tdelta.days > 7:\n # round to nearest half, dropping '.0' when whole\n return f'{round((tdelta.days/7)*2)/2:g} weeks'\n elif tdelta.days == 7:\n return 'a week'\n elif tdelta.days > 1:\n return f'{tdelta.days} days'\n elif tdelta.days == 1:\n return f'a day'\n # break it down into parts of a day\n hours = tdelta.seconds // 3600\n if hours > 1:\n return f'{hours:.0f} hours'\n elif hours == 1:\n return f'an hour'\n minutes = tdelta.seconds % 3600 / 60\n if minutes > 1:\n return f'{minutes:.0f} minutes'\n elif minutes == 1:\n return f'a minute'\n return 'moments'", "def age(self):\n today = datetime.date.today()\n\n return today.year - int(self.birthday[0:4])", "def _age_on_date(bday, exp_date):\n if exp_date < bday:\n raise ValueError(\"The experimentation date must be after the birth \"\n \"date\")\n if exp_date.month > bday.month:\n return exp_date.year - bday.year\n elif exp_date.month == bday.month:\n if exp_date.day >= bday.day:\n return exp_date.year - bday.year\n return exp_date.year - bday.year - 1", "def _perAgeChoiceSelector(self, params):\n\n entity = params['entity']\n\n birth_date = entity.birth_date\n today = params.get('today', date.today())\n\n days = today - birth_date\n days -= timedelta(days=calendar.leapdays(birth_date.year, today.year))\n if calendar.isleap(today.year) and today.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n if calendar.isleap(birth_date.year) and birth_date.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n\n return str(days.days / 365)", "def test_ave_age_range(step):\n diff = step[\"ave_birth\"] - step[\"birth\"]\n assert 0 < diff < 15E6", "def age(self):\n today = date.today()\n birthday = datetime.strptime(self.birt['date'], \"%d %b %Y\")\n return birthday", "def calculate_current_age(dob):\n today = datetime.date.today()\n years = today.year - dob.year\n if today.month < dob.month or (today.month == dob.month and today.day < dob.day):\n years -= 1\n return years", "def is_vintage(self):\n return self.get_age()>=AGE", "def num_older_than(age: float) -> int:\r\n cur = con.cursor()\r\n count_older = cur.execute(\r\n \"\"\"SELECT COUNT(Patient_ID)\r\n FROM Patient\r\n WHERE (JULIANDAY('now') - JULIANDAY(Date_Of_Birth)) / ? > ?\"\"\",\r\n [DAYS_IN_YEAR, age],\r\n ).fetchall()\r\n\r\n return count_older[0][0]", "def get_age(date):\n today = datetime.date.today()\n return today.year - date.year - ((today.month, today.day) < (date.month, date.day))", "def get_age(self, name=None):\n now = datetime.now()\n delta = relativedelta(now, self.date_of_birth)\n years_months_days = str(delta.years) + 'y ' + str(delta.months) + \\\n 'm ' + str(delta.days) + 'd'\n return years_months_days", "def get_age(self):\n return CURRENT_YEAR - self.year", "def get_age(self):\n return CURRENT_YEAR - self.year", "def get_age(self):\n born = self.birth_date\n if not born:\n return 0\n today = fields.Date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))", "def test_with_non_datetime(self):\n class Foo:\n def __init__(self, now):\n self.day = now.day\n self.month = now.month\n self.year = now.year\n\n self.assertEqual(ageid(Foo(self.now)), 'age1')", "def get_age(self):\n age = CURRENT_YEAR - self.year\n return age", "def calculate_age(born):\n today = datetime.date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))", "def is_old(self):\n return self.age > self.lifespan", "def age(self, agent):\n return (self.time - agent.born)/52.0" ]
[ "0.7187669", "0.71045613", "0.69863856", "0.6409827", "0.6115783", "0.6011794", "0.59129065", "0.58910406", "0.5766613", "0.57489073", "0.570259", "0.5692219", "0.56637126", "0.5635098", "0.56275165", "0.5569759", "0.5563165", "0.5558488", "0.55378413", "0.5536425", "0.5511162", "0.5501853", "0.5495523", "0.5495523", "0.5488229", "0.54630786", "0.54618174", "0.5461429", "0.5458604", "0.5442617" ]
0.74781424
0
Testing {% ageid %} with three days ago
def test_with_now_minus_3_days(self): self.assertEqual(ageid(self.now - timedelta(3)), 'age4')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_now_minus_2_days(self):\n self.assertEqual(ageid(self.now - timedelta(2)), 'age3')", "def test_with_now_minus_4_days(self):\n self.assertEqual(ageid(self.now - timedelta(4)), 'age5')", "def test_with_now_minus_1_day(self):\n self.assertEqual(ageid(self.now - timedelta(1)), 'age2')", "def test_with_now(self):\n self.assertEqual(ageid(self.now), 'age1')", "def age(self):\n return datetime.now().year - self.birth_day.year", "def age(self):\n\n years, months, days = calculate_age(self.birthdate)\n if years:\n return \"%d year%s old\" % (years, \"s\" if years > 1 else \"\")\n elif months:\n return \"%d month%s old\" % (months, \"s\" if months > 1 else \"\")\n else:\n return \"%d day%s old\" % (days, \"s\" if days > 1 else \"\")", "def thirty_days_ago():\n return date.today() - timedelta(days=30)", "def age_diff(self, other):\n diff = self.age - other.age\n print(abs(diff), \"year difference\")", "def SAgeDdt(ddt):\n if ddt.days < 0:\n return \"in the future?\"\n months = int(ddt.days*12/365)\n years = int(ddt.days/365)\n if years >= 1:\n return \"%d year%s ago\" % (years, SPlural(years))\n if months >= 3:\n return \"%d months ago\" % months \n if ddt.days == 1:\n return \"yesterday\"\n if ddt.days > 1:\n return \"%d days ago\" % ddt.days\n hrs = int(ddt.seconds/60/60)\n if hrs >= 1:\n return \"%d hour%s ago\" % (hrs, SPlural(hrs))\n minutes = round(ddt.seconds/60)\n if minutes < 1:\n return \"seconds ago\"\n return \"%d minute%s ago\" % (minutes, SPlural(minutes))", "def age(self):\n delta = now() - self.creation\n return delta.seconds", "def age(birthdate):\n today=date.today()\n birthdate=date(birthdate[2],birthdate[1],birthdate[0])\n if birthdate>today:\n return \"Person has not been born yet!\"\n difference=today-birthdate\n return difference.days", "def age(self):\n today = datetime.date.today()\n\n return today.year - int(self.birthday[0:4])", "def get_age(self):\n today = datetime.now()\n return today.year \\\n - self.date_of_birth.year \\\n - ((today.month, self.date_of_birth.day) \\\n < (self.date_of_birth.month, self.date_of_birth.day))", "def age(self) -> str:\n tdelta = dt.now() - self.created_timestamp\n if tdelta.days >= 548: # enough to round it up to 2 years\n return f'about {tdelta.days/365:.0f} years'\n elif tdelta.days >= 345: # enough to round it up to 1 year (so it doesn't report '12 months')\n return f'about a year'\n elif tdelta.days > 45: # beyond 1 month (after rounding)\n return f'about {tdelta.days/30:.0f} months'\n elif tdelta.days > 24: # enough to round it up to 1 month (so it doesn't report '4 weeks')\n return f'about a month'\n elif tdelta.days > 7:\n # round to nearest half, dropping '.0' when whole\n return f'{round((tdelta.days/7)*2)/2:g} weeks'\n elif tdelta.days == 7:\n return 'a week'\n elif tdelta.days > 1:\n return f'{tdelta.days} days'\n elif tdelta.days == 1:\n return f'a day'\n # break it down into parts of a day\n hours = tdelta.seconds // 3600\n if hours > 1:\n return f'{hours:.0f} hours'\n elif hours == 1:\n return f'an hour'\n minutes = tdelta.seconds % 3600 / 60\n if minutes > 1:\n return f'{minutes:.0f} minutes'\n elif minutes == 1:\n return f'a minute'\n return 'moments'", "def _perAgeChoiceSelector(self, params):\n\n entity = params['entity']\n\n birth_date = entity.birth_date\n today = params.get('today', date.today())\n\n days = today - birth_date\n days -= timedelta(days=calendar.leapdays(birth_date.year, today.year))\n if calendar.isleap(today.year) and today.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n if calendar.isleap(birth_date.year) and birth_date.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n\n return str(days.days / 365)", "def get_age(date):\n today = datetime.date.today()\n return today.year - date.year - ((today.month, today.day) < (date.month, date.day))", "def test_with_non_datetime(self):\n class Foo:\n def __init__(self, now):\n self.day = now.day\n self.month = now.month\n self.year = now.year\n\n self.assertEqual(ageid(Foo(self.now)), 'age1')", "def num_older_than(age: float) -> int:\r\n cur = con.cursor()\r\n count_older = cur.execute(\r\n \"\"\"SELECT COUNT(Patient_ID)\r\n FROM Patient\r\n WHERE (JULIANDAY('now') - JULIANDAY(Date_Of_Birth)) / ? > ?\"\"\",\r\n [DAYS_IN_YEAR, age],\r\n ).fetchall()\r\n\r\n return count_older[0][0]", "def get_age(self):\n return CURRENT_YEAR - self.year", "def get_age(self):\n return CURRENT_YEAR - self.year", "def test_ave_age_range(step):\n diff = step[\"ave_birth\"] - step[\"birth\"]\n assert 0 < diff < 15E6", "def is_vintage(self):\n return self.get_age()>=AGE", "def get_age(self):\n age = CURRENT_YEAR - self.year\n return age", "def get_age(self):\n return int(CURRENT_YEAR[:4]) - self.year # String-slice only the year", "def age(self):\n today = date.today()\n birthday = datetime.strptime(self.birt['date'], \"%d %b %Y\")\n return birthday", "def _age_on_date(bday, exp_date):\n if exp_date < bday:\n raise ValueError(\"The experimentation date must be after the birth \"\n \"date\")\n if exp_date.month > bday.month:\n return exp_date.year - bday.year\n elif exp_date.month == bday.month:\n if exp_date.day >= bday.day:\n return exp_date.year - bday.year\n return exp_date.year - bday.year - 1", "def age(self, year=None):\n y, m = self.master['birthYear'], self.master['birthMonth']\n return (year if year else self.currentyear) - y - (m > 6)", "def get_age(self):\n born = self.birth_date\n if not born:\n return 0\n today = fields.Date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))", "def _set_age(cls, data):\n birth = data.get(\"birth\")\n if birth:\n today = datetime.now().date()\n data[\"age\"] = relativedelta(today, birth).years\n return data", "def calculate_current_age(dob):\n today = datetime.date.today()\n years = today.year - dob.year\n if today.month < dob.month or (today.month == dob.month and today.day < dob.day):\n years -= 1\n return years" ]
[ "0.72407234", "0.71853626", "0.69618046", "0.6308967", "0.5874855", "0.5764519", "0.57058084", "0.5670079", "0.5669819", "0.56191695", "0.55904025", "0.5588826", "0.5547945", "0.55410856", "0.54885393", "0.5463492", "0.54514664", "0.5433101", "0.5414855", "0.5414855", "0.53961027", "0.5382451", "0.53821313", "0.5346582", "0.53400517", "0.5325287", "0.5323781", "0.5323721", "0.53198695", "0.53150976" ]
0.7658383
0
Testing {% ageid %} with four days ago
def test_with_now_minus_4_days(self): self.assertEqual(ageid(self.now - timedelta(4)), 'age5')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_now_minus_3_days(self):\n self.assertEqual(ageid(self.now - timedelta(3)), 'age4')", "def test_with_now_minus_2_days(self):\n self.assertEqual(ageid(self.now - timedelta(2)), 'age3')", "def test_with_now_minus_1_day(self):\n self.assertEqual(ageid(self.now - timedelta(1)), 'age2')", "def test_with_now(self):\n self.assertEqual(ageid(self.now), 'age1')", "def age(self):\n return datetime.now().year - self.birth_day.year", "def age(self):\n\n years, months, days = calculate_age(self.birthdate)\n if years:\n return \"%d year%s old\" % (years, \"s\" if years > 1 else \"\")\n elif months:\n return \"%d month%s old\" % (months, \"s\" if months > 1 else \"\")\n else:\n return \"%d day%s old\" % (days, \"s\" if days > 1 else \"\")", "def SAgeDdt(ddt):\n if ddt.days < 0:\n return \"in the future?\"\n months = int(ddt.days*12/365)\n years = int(ddt.days/365)\n if years >= 1:\n return \"%d year%s ago\" % (years, SPlural(years))\n if months >= 3:\n return \"%d months ago\" % months \n if ddt.days == 1:\n return \"yesterday\"\n if ddt.days > 1:\n return \"%d days ago\" % ddt.days\n hrs = int(ddt.seconds/60/60)\n if hrs >= 1:\n return \"%d hour%s ago\" % (hrs, SPlural(hrs))\n minutes = round(ddt.seconds/60)\n if minutes < 1:\n return \"seconds ago\"\n return \"%d minute%s ago\" % (minutes, SPlural(minutes))", "def age(self):\n today = datetime.date.today()\n\n return today.year - int(self.birthday[0:4])", "def get_age(self):\n today = datetime.now()\n return today.year \\\n - self.date_of_birth.year \\\n - ((today.month, self.date_of_birth.day) \\\n < (self.date_of_birth.month, self.date_of_birth.day))", "def age(birthdate):\n today=date.today()\n birthdate=date(birthdate[2],birthdate[1],birthdate[0])\n if birthdate>today:\n return \"Person has not been born yet!\"\n difference=today-birthdate\n return difference.days", "def age_diff(self, other):\n diff = self.age - other.age\n print(abs(diff), \"year difference\")", "def age(self) -> str:\n tdelta = dt.now() - self.created_timestamp\n if tdelta.days >= 548: # enough to round it up to 2 years\n return f'about {tdelta.days/365:.0f} years'\n elif tdelta.days >= 345: # enough to round it up to 1 year (so it doesn't report '12 months')\n return f'about a year'\n elif tdelta.days > 45: # beyond 1 month (after rounding)\n return f'about {tdelta.days/30:.0f} months'\n elif tdelta.days > 24: # enough to round it up to 1 month (so it doesn't report '4 weeks')\n return f'about a month'\n elif tdelta.days > 7:\n # round to nearest half, dropping '.0' when whole\n return f'{round((tdelta.days/7)*2)/2:g} weeks'\n elif tdelta.days == 7:\n return 'a week'\n elif tdelta.days > 1:\n return f'{tdelta.days} days'\n elif tdelta.days == 1:\n return f'a day'\n # break it down into parts of a day\n hours = tdelta.seconds // 3600\n if hours > 1:\n return f'{hours:.0f} hours'\n elif hours == 1:\n return f'an hour'\n minutes = tdelta.seconds % 3600 / 60\n if minutes > 1:\n return f'{minutes:.0f} minutes'\n elif minutes == 1:\n return f'a minute'\n return 'moments'", "def age(self):\n delta = now() - self.creation\n return delta.seconds", "def get_age(date):\n today = datetime.date.today()\n return today.year - date.year - ((today.month, today.day) < (date.month, date.day))", "def age(self):\n today = date.today()\n birthday = datetime.strptime(self.birt['date'], \"%d %b %Y\")\n return birthday", "def get_age(self, name=None):\n now = datetime.now()\n delta = relativedelta(now, self.date_of_birth)\n years_months_days = str(delta.years) + 'y ' + str(delta.months) + \\\n 'm ' + str(delta.days) + 'd'\n return years_months_days", "def _perAgeChoiceSelector(self, params):\n\n entity = params['entity']\n\n birth_date = entity.birth_date\n today = params.get('today', date.today())\n\n days = today - birth_date\n days -= timedelta(days=calendar.leapdays(birth_date.year, today.year))\n if calendar.isleap(today.year) and today.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n if calendar.isleap(birth_date.year) and birth_date.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n\n return str(days.days / 365)", "def get_age(self):\n return int(CURRENT_YEAR[:4]) - self.year # String-slice only the year", "def get_age(self):\n return CURRENT_YEAR - self.year", "def get_age(self):\n return CURRENT_YEAR - self.year", "def calculate_current_age(dob):\n today = datetime.date.today()\n years = today.year - dob.year\n if today.month < dob.month or (today.month == dob.month and today.day < dob.day):\n years -= 1\n return years", "def age(self, year=None):\n y, m = self.master['birthYear'], self.master['birthMonth']\n return (year if year else self.currentyear) - y - (m > 6)", "def test_with_non_datetime(self):\n class Foo:\n def __init__(self, now):\n self.day = now.day\n self.month = now.month\n self.year = now.year\n\n self.assertEqual(ageid(Foo(self.now)), 'age1')", "def get_age(self):\n born = self.birth_date\n if not born:\n return 0\n today = fields.Date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))", "def _age_on_date(bday, exp_date):\n if exp_date < bday:\n raise ValueError(\"The experimentation date must be after the birth \"\n \"date\")\n if exp_date.month > bday.month:\n return exp_date.year - bday.year\n elif exp_date.month == bday.month:\n if exp_date.day >= bday.day:\n return exp_date.year - bday.year\n return exp_date.year - bday.year - 1", "def get_age(self):\n return Guitar.CURRENT_YEAR - self.year", "def get_age(self):\n age = CURRENT_YEAR - self.year\n return age", "def thirty_days_ago():\n return date.today() - timedelta(days=30)", "def test_ave_age_range(step):\n diff = step[\"ave_birth\"] - step[\"birth\"]\n assert 0 < diff < 15E6", "def is_vintage(self):\n return self.get_age()>=AGE" ]
[ "0.74405986", "0.7243347", "0.7164045", "0.64622", "0.61869997", "0.5989337", "0.5977138", "0.59006375", "0.58485234", "0.58320135", "0.5765161", "0.57545257", "0.5669327", "0.56675255", "0.5645776", "0.56452954", "0.5626133", "0.56260896", "0.5614728", "0.5614728", "0.56090474", "0.55981123", "0.5596386", "0.5581624", "0.5560014", "0.55531913", "0.55298865", "0.5510046", "0.5505177", "0.5473678" ]
0.76875675
0
Testing {% ageid %} with nondatetime object
def test_with_non_datetime(self): class Foo: def __init__(self, now): self.day = now.day self.month = now.month self.year = now.year self.assertEqual(ageid(Foo(self.now)), 'age1')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_now(self):\n self.assertEqual(ageid(self.now), 'age1')", "def test_with_now_minus_1_day(self):\n self.assertEqual(ageid(self.now - timedelta(1)), 'age2')", "def test_with_now_minus_4_days(self):\n self.assertEqual(ageid(self.now - timedelta(4)), 'age5')", "def test_with_now_minus_2_days(self):\n self.assertEqual(ageid(self.now - timedelta(2)), 'age3')", "def test_with_now_minus_3_days(self):\n self.assertEqual(ageid(self.now - timedelta(3)), 'age4')", "def test_template_matcher(self):\n from srsly.ruamel_yaml.serializer import templated_id\n\n assert templated_id(u\"id001\")\n assert templated_id(u\"id999\")\n assert templated_id(u\"id1000\")\n assert templated_id(u\"id0001\")\n assert templated_id(u\"id0000\")\n assert not templated_id(u\"id02\")\n assert not templated_id(u\"id000\")\n assert not templated_id(u\"x000\")", "def test_person_loads_and_renders(self):\n person = Person.objects.get(user__username = 'admin')\n try:\n template = '{% load edit_link %}{% edit_link object %}'\n context = {'object': person}\n rendered = Template(template).render(Context(context))\n except:\n self.fail(\"raised exception while template rendering\")\n self.assertEquals(rendered, '<a href=\"/admin/hello/person/%s/\">(admin)</a>' % str(person.pk))", "def testClinicalPatientAge(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"age\")\n\n self.util.intPropertyTest(self, attr, \"age\")", "def test_person_loads_and_renders(self):\n response = self.client.get(reverse('main'))\n request_data = RequestData.objects.latest('pub_date')\n try:\n template = '{% load edit_link %}{% edit_link object %}'\n context = {'object': request_data}\n rendered = Template(template).render(Context(context))\n except:\n self.fail(\"raised exception while template rendering\")\n self.assertEquals(rendered, '<a href=\"/admin/hello/requestdata/%s/\">(admin)</a>' % str(request_data.pk))", "def test_tag_with_invalid_object(self):\n with self.assertRaises(TemplateSyntaxError):\n edit_link('anyobject')", "def is_vintage(self):\n return self.get_age()>=AGE", "def age(self):\n return datetime.now().year - self.birth_day.year", "def age(min=1, max=99):\r\n\r\n return '%.i' % ((_random.randint(min, max + 1) if min\r\n and max else _random.randint(1, 100)))", "def test_ave_age_range(step):\n diff = step[\"ave_birth\"] - step[\"birth\"]\n assert 0 < diff < 15E6", "def valid_age(line):\n dob = line.o_DOB\n if not _is_21(dob):\n rule = 'Allowed age'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True", "def getAge(self):\r\n return self.age", "def _set_age(cls, data):\n birth = data.get(\"birth\")\n if birth:\n today = datetime.now().date()\n data[\"age\"] = relativedelta(today, birth).years\n return data", "def _get_age(self):\n return self.__age", "def age(self):\n\n years, months, days = calculate_age(self.birthdate)\n if years:\n return \"%d year%s old\" % (years, \"s\" if years > 1 else \"\")\n elif months:\n return \"%d month%s old\" % (months, \"s\" if months > 1 else \"\")\n else:\n return \"%d day%s old\" % (days, \"s\" if days > 1 else \"\")", "def verify_age(age):\n try:\n age_ = int(age)\n if age_ < 1:\n age_ = age_ * -1\n except ValueError:\n age_ = \"\"\n return str(age_)", "def test_uuid_uneditable(self):\n id_field = Movie._meta.get_field_by_name('id')[0]\n self.assertEqual(id_field.editable, False)", "def test_datetime_field():", "def test_date_of_birth(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qDateOfBirth': [19951226],\n }}\n clone(entries)\n self.assertEqual(date(1995, 12, 26), Person.objects.first().date_of_birth)", "def testTaggerLong(self):\n text = \"\"\"Churchill was born at the family's ancestral home,\nBlenheim Palace in Oxfordshire, on 30 November 1874,\nat which time the United Kingdom was the dominant world power.\nDirect descendants of the Dukes of Marlborough, his family were\namong the highest levels of the British aristocracy, and thus\nhe was born into the country's governing elite.\nHis paternal grandfather, John Spencer-Churchill,\n7th Duke of Marlborough, had been a Member of Parliament (MP)\nfor ten years, a member of the Conservative Party who served\nin the government of Prime Minister Benjamin Disraeli.\nHis own father, Lord Randolph Churchill, had been elected\nConservative MP for Woodstock in 1873.\nHis mother, Jennie Churchill (née Jerome), was from an\nAmerican family whose substantial wealth derived from\nfinance. The couple had met in August 1873, and were\nengaged three days later, marrying at the British Embassy\nin Paris in April 1874. The couple lived beyond their income\nand were frequently in debt; according to the biographer\nSebastian Haffner, the family were \"rich by normal\nstandards but poor by those of the rich\".\"\"\".replace(\"\\n\", \" \")\n # Note that the model does not handle the case of year alone (e.g. \"1873\")\n # since that can easily overgenerate. One would want to build a more\n # sophisticated classifier to handle such cases.\n result = dates.tag(text)\n self.assertIn(\n \"<date><day>30</day><month>11</month><year>1874</year></date>\", result)\n self.assertIn(\"<date><month>8</month><year>1873</year></date>\", result)\n self.assertIn(\"<date><month>4</month><year>1874</year></date>\", result)", "def get_age(self):\r\n return self.age", "def _perAgeChoiceSelector(self, params):\n\n entity = params['entity']\n\n birth_date = entity.birth_date\n today = params.get('today', date.today())\n\n days = today - birth_date\n days -= timedelta(days=calendar.leapdays(birth_date.year, today.year))\n if calendar.isleap(today.year) and today.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n if calendar.isleap(birth_date.year) and birth_date.timetuple()[7] > 31 + 29:\n days += timedelta(days=1)\n\n return str(days.days / 365)", "def test_patient_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, '2000-01-01')", "def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))", "def age():\n return 1", "def age():\n return 1" ]
[ "0.63388425", "0.63034344", "0.62018883", "0.5981848", "0.5964965", "0.54276353", "0.53872085", "0.5364365", "0.5332901", "0.5280744", "0.5114568", "0.5074603", "0.5051021", "0.50412714", "0.5040437", "0.5022143", "0.50190246", "0.5007389", "0.49899918", "0.4977157", "0.4945081", "0.49376857", "0.49333814", "0.49217752", "0.4896585", "0.48911643", "0.48818022", "0.4876936", "0.48711002", "0.48711002" ]
0.6560905
0
Testing {% attr %} with value
def test_with_value(self): t = Template('{% load djblets_utils %}' '<span{% attr "class" %}\n' '{% if some_bool %}truthy{% endif %}\n' '{% endattr %}>') self.assertEqual( t.render(Context({ 'some_bool': True, })), '<span class="truthy">')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_escapes_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}<hello>{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"&lt;hello&gt;\">')", "def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')", "def test_html_tag(self):\n tag = 'fake_tag_given'\n attrs = ' id=\"fake_element\" fake_attr=\"pointless value\"'\n content = 'This is some test content'\n expected = '<%(tag)s%(attr)s>%(content)s</%(tag)s>' % {'tag': tag, 'attr': attrs, 'content': content}\n actual = self.form._html_tag(tag, content, attrs)\n self.assertEqual(expected, actual)", "def test_attributeWithValue(self):\n xp = XPathQuery(\"/foo[@attrib1='value1']\")\n self.assertEqual(xp.matches(self.e), 1)", "def test_condenses_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"some value\">')", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def test_traversal__path_resource_attribute(path, attribute_name, value):\n from pyramid.traversal import traverse\n root_resource = root_resource_factory()\n t = traverse(root_resource, path)\n context = t['context']\n assert getattr(context, attribute_name) == value", "def test_attributeWithValueAny(self):\n xp = XPathQuery(\"/foo/*[@attrib2='value2']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar2])", "def attr(self, name):\r\n return Assert(getattr(self.obj, name))", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def test_attribute(self):\n xp = XPathQuery(\"/foo[@attrib1]\")\n self.assertEqual(xp.matches(self.e), True)", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def test_search_for():\n anchor = _gen_link(\n '{% search_for terms=\"has spaces\" reg=\"1234\" version=\"vvv\" %}')\n assert '1234' in anchor.get('href')\n assert 'vvv' in anchor.get('href')\n assert 'has%20spaces' in anchor.get('href')", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def test_register_existing_attr(self):\n pass", "def test_simple(self):\n self.assertEqual(render('{% fish_as as out %}-{{ out }}'), '-fish')\n self.assertEqual(render('{% another_fish_as as out %}-{{ out }}'), '-fish')", "def _testCurrentPageWebAttribute(self, attr):\n settings = self._currentPageSettings()\n return settings is not None and settings.testAttribute(attr)", "def test_string_default(self):\n tag = Tag()\n self.assertEqual(tag.value, 'default')", "def test_attr_dict(self):\n obj = awstats_reader.AttrDict([('this','that'), ('thus','those')])\n self.assertEqual(obj.thus, 'those')", "def test_get_page_template_tag(self):\n context = Context({})\n pl1 = \"\"\"{% load pages_tags %}{% get_page get-page-slug as toto %}{{ toto }}\"\"\"\n template = get_template_from_string(pl1)\n self.assertEqual(template.render(context), u'None')\n page = self.new_page({'slug':'get-page-slug'})\n self.assertEqual(template.render(context), u'get-page-slug')", "def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")", "def check_property(self, descriptor):\r\n self.assertEqual(descriptor.get_html(), descriptor.render('studio_view').content)", "def testFillMuray(self):\n t1 = \"{% load greeking_tags %}{% fillmurray 200 200 %}\"\n ctx, out = self.render(t1)\n self.assertEqual(out, '<img src=\"http://www.fillmurray.com/200/200/\"/>')\n self.assertRaises(\n TemplateSyntaxError,\n self.render,\n \"{% load greeking_tags %}{% fillmurray foobar %}\",\n )", "def test_with_nocondense_preserves_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" nocondense %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"\\nsome \\n\\nvalue\\n\">')", "def test_optional(self):\n self.assertEqual(render('{% maybe_as %}-{{ out }}'), 'maybe-')\n self.assertEqual(render('{% maybe_as as out %}-{{ out }}'), '-maybe')", "def test_tag_is_in_the_template(self):\n\n template = Template(\n '{% load profile_admin_editing %}{% edit_link profile %}')\n\n context = Context({'profile': self.profile})\n\n self.assertEqual(self.super_link, template.render(context))", "def test_get_attribute_data(self):\n pass" ]
[ "0.6986304", "0.64239806", "0.62053716", "0.60444796", "0.6013152", "0.5872319", "0.57564497", "0.5672158", "0.5657833", "0.5617325", "0.5616147", "0.55541307", "0.55276436", "0.55276436", "0.5515096", "0.53856736", "0.5377886", "0.5365207", "0.53558373", "0.5329564", "0.53229076", "0.53202057", "0.53045017", "0.5289502", "0.528123", "0.5281136", "0.52558625", "0.5249097", "0.52271426", "0.5223214" ]
0.7152068
0
Testing {% attr %} with no value
def test_without_value(self): t = Template('{% load djblets_utils %}' '<span{% attr "class" %}\n' '{% if some_bool %}falsy{% endif %}\n' '{% endattr %}>') self.assertEqual( t.render(Context({ 'some_bool': False, })), '<span>')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_nocondense_preserves_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" nocondense %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"\\nsome \\n\\nvalue\\n\">')", "def test_attribute_not_equal_no_quotes(self):\n\n # No quotes\n self.assert_selector(\n self.MARKUP,\n 'body [id!=\\\\35]',\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def test_notnull_attrs(self):\n obj = VeilRestPaginator(name='name', ordering='ordering', limit=None, offset=5)\n assert 'name' in obj.notnull_attrs\n assert 'limit' not in obj.notnull_attrs", "def test_optional(self):\n self.assertEqual(render('{% maybe_as %}-{{ out }}'), 'maybe-')\n self.assertEqual(render('{% maybe_as as out %}-{{ out }}'), '-maybe')", "def valid_att_in_field(arch, **kwargs):\n return not arch.xpath('//field[not(@name)]')", "def test_with_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}truthy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': True,\n })),\n '<span class=\"truthy\">')", "def test_textNotOperator(self):\n xp = XPathQuery(\"/foo[not(@nosuchattrib)]\")\n self.assertEqual(xp.matches(self.e), True)", "def test_condenses_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"some value\">')", "def test_attribute_not_equal_quotes(self):\n\n # Quotes\n self.assert_selector(\n self.MARKUP,\n \"body [id!='5']\",\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def test_no_attributes(self):\n self.run_mock_analyzer([])\n eq_(self.obj.analyze_attribute.called, False)", "def header_field_should_not_have_value(self, label):\n locator = lex_locators[\"record\"][\"header\"][\"field_value\"].format(label)\n self.selenium.page_should_not_contain_element(locator)", "def test_undefined_as_null_indicator(self):\n self.custom_null_indicator_template('undefined')", "def test_default_hidden_not_in_attributes(self):\n self.assertNotIn(\n ATTR_HIDDEN,\n self.hass.states.get(self.entity.entity_id).attributes)", "def test_escapes_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}<hello>{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"&lt;hello&gt;\">')", "def test_no_update_on_data_element(self):\n no_update = self.admitgen.data.attrib['noupdate']\n self.assertEqual(no_update, '1', 'Incorrect noupdate flag')", "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def test_register_nonexisting_attr(self):\n pass", "def test_get_tag_fail(self):\n self.assertRaises(AttributeError, get_tag, None, \"h1\")\n self.assertRaises(\n AttributeError, get_tag, \"<h1>This is not a XML tag object</h1>\", \"h1\"\n )", "def not_met(predicate, request):\n return not predicate(request)", "def is_excluded(self, attr_name, request):\n return False", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_example_field_is_none(self):\n self._example['weight'] = None\n output = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertNotEmpty(output)", "def test_attribute_not_equal_double_quotes(self):\n\n # Double quotes\n self.assert_selector(\n self.MARKUP,\n 'body [id!=\"5\"]',\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def test_as_default(self):\n self.assertEqual(render('{% default_as %}...{{ snake }}'), '...hisss')", "def test_string_default(self):\n tag = Tag()\n self.assertEqual(tag.value, 'default')", "def test_link_tag_empty_href_attribute(m):\n m.get('http://mock.com/', text='<link rel=\"icon\" href=\"\">')\n\n with pytest.warns(None):\n icons = favicon.get('http://mock.com/')\n\n assert not icons", "def test_default_null_indicator(self):\n self.custom_null_indicator_template()", "def test_no_markup_type_field_if_set(self):\r\n self.assertTrue('markdown_field_markup_type' not in\r\n ArticleForm().fields.keys())" ]
[ "0.6310917", "0.61786014", "0.6155865", "0.61103326", "0.6076733", "0.60245657", "0.60144234", "0.5987151", "0.59353393", "0.5877451", "0.57956505", "0.5780371", "0.5739188", "0.57341295", "0.570383", "0.5699017", "0.56983703", "0.56973076", "0.56841075", "0.5683129", "0.56572425", "0.56572425", "0.56513727", "0.56378746", "0.5633864", "0.5578057", "0.5571772", "0.55397636", "0.5537497", "0.5501911" ]
0.7473954
0
Testing {% attr %} escapes value
def test_escapes_value(self): t = Template('{% load djblets_utils %}' '<span{% attr "data-foo" %}<hello>{% endattr %}>') self.assertEqual( t.render(Context()), '<span data-foo="&lt;hello&gt;">')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_condenses_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"some value\">')", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def _xml_escape_attr(attr, skip_single_quote=True):\r\n escaped = (attr\r\n .replace('&', '&amp;')\r\n .replace('\"', '&quot;')\r\n .replace('<', '&lt;')\r\n .replace('>', '&gt;'))\r\n if not skip_single_quote:\r\n escaped = escaped.replace(\"'\", \"&#39;\")\r\n return escaped", "def test_with_nocondense_preserves_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" nocondense %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"\\nsome \\n\\nvalue\\n\">')", "def _quoteattr(self, attr):\n attr = xml_safe(attr)\n if isinstance(attr, str) and not UNICODE_STRINGS:\n attr = attr.encode(self.encoding)\n return saxutils.quoteattr(attr)", "def _quoteattr(self, attr):\n attr = xml_safe(attr)\n if isinstance(attr, unicode) and not UNICODE_STRINGS:\n attr = attr.encode(self.encoding)\n return saxutils.quoteattr(attr)", "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def test_with_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}truthy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': True,\n })),\n '<span class=\"truthy\">')", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def test_html_tag(self):\n tag = 'fake_tag_given'\n attrs = ' id=\"fake_element\" fake_attr=\"pointless value\"'\n content = 'This is some test content'\n expected = '<%(tag)s%(attr)s>%(content)s</%(tag)s>' % {'tag': tag, 'attr': attrs, 'content': content}\n actual = self.form._html_tag(tag, content, attrs)\n self.assertEqual(expected, actual)", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def html_filter(val):\n if isinstance(val, Undefined):\n return UNDEFINED_LABEL\n return html_escape(val)", "def as_html(self):\r\n return mark_safe(' '.join(['%s=\"%s\"' % (k, escape(v if not callable(v) else v()))\r\n for k, v in six.iteritems(self)]))", "def test_symlit_escape():\n return \"\\\"=\\\"\"", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def value_as_html(self):\n property_name = \"_%s_as_html\" % self.attribute.type\n return getattr(self, property_name, self.value_as_text)", "def decorate_value(self, value):\n if self.type_name == 'uml:LiteralString':\n value = \"'\" + value + \"'\"\n else:\n # if all else fails, assume string\n value = \"'\" + value + \"'\"\n\n return value", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def assert_in_html(member, container, **kwargs):\n member = markupsafe.escape(member)\n return assert_in(member, container, **kwargs)", "def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')", "def htmlText(text, attr='', escapeText=False):\n return '<div%s>%s</div>\\n' % (sep(attr),escape(text) if escapeText else text)", "def test_with_unsafe(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" unsafe %}<hello>{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(t.render(Context()), '&lt;hello&gt;')", "def test_single_quotes_returned(self):\n test_string = \"<p style=\\\"font-weight: bold;\\\">Test</p>\"\n cleaned = sanitizeFeedback(test_string)\n self.assertIn(\"'\", cleaned)\n self.assertEqual(cleaned, \n \"<p style='font-weight: bold;'>Test</p>\"\n )", "def test_search_for():\n anchor = _gen_link(\n '{% search_for terms=\"has spaces\" reg=\"1234\" version=\"vvv\" %}')\n assert '1234' in anchor.get('href')\n assert 'vvv' in anchor.get('href')\n assert 'has%20spaces' in anchor.get('href')", "def test_attribute_not_equal_double_quotes(self):\n\n # Double quotes\n self.assert_selector(\n self.MARKUP,\n 'body [id!=\"5\"]',\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def catch_unquoted_attrs(self, text, attrlist):\n for tup in attrlist:\n (an, av) = tup\n rgx = \"%s\\s*=\\s*\" % (an) \\\n + \"['\" \\\n + '\"]%s[\"' % (re.escape(av)) \\\n + \"']\"\n q = re.search(rgx, self.unescape(text))\n if q == None:\n self.errmsg(\"unquoted attribute in '%s'\" % (text))", "def quote(s):\n return unescape(quoteattr(s))", "def test_if_filter_statement():\n r = convert_code(\n \"{if awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"" ]
[ "0.68725014", "0.66181767", "0.6570079", "0.6313018", "0.6299173", "0.6291482", "0.62913966", "0.62795967", "0.62727976", "0.625454", "0.6089015", "0.6058828", "0.6058828", "0.60290384", "0.59374166", "0.5875695", "0.5865545", "0.58360916", "0.5792689", "0.5773664", "0.5767308", "0.5752568", "0.5724921", "0.5718146", "0.5713945", "0.5695302", "0.5667904", "0.56635666", "0.5656252", "0.563246" ]
0.8348637
0
Testing {% attr %} condenses/strips extra whitespace by default
def test_condenses_whitespace(self): t = Template('{% load djblets_utils %}' '<span{% attr "data-foo" %}\n' 'some \n\n' 'value\n' '{% endattr %}>') self.assertEqual( t.render(Context()), '<span data-foo="some value">')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_nocondense_preserves_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" nocondense %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"\\nsome \\n\\nvalue\\n\">')", "def test_escapes_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}<hello>{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"&lt;hello&gt;\">')", "def test_attribute_not_equal_no_quotes(self):\n\n # No quotes\n self.assert_selector(\n self.MARKUP,\n 'body [id!=\\\\35]',\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def test_attribute_not_equal_quotes(self):\n\n # Quotes\n self.assert_selector(\n self.MARKUP,\n \"body [id!='5']\",\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def test_with_spaceless(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" spaceless %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span><strong>\\n test123\\n </strong></span>]')", "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')", "def test_attribute_not_equal_double_quotes(self):\n\n # Double quotes\n self.assert_selector(\n self.MARKUP,\n 'body [id!=\"5\"]',\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def test_collapsed_whitespace(self):\n self.assertSoupEquals(\"<p> </p>\", \"<p> </p>\")", "def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')", "def test_relaxed_spacing_no_title(self):\n\n expected = r'''\n <details class=\"relaxed spacing no title\">\n <summary>Relaxed</summary>\n <p>content</p>\n </details>\n '''\n\n self.check_markdown(\n r'''\n ???relaxed spacing no title\n content\n ''',\n expected,\n True\n )", "def test_can_filter_attributes(self):\n text = '<b><a href=\"\" target=\"_blank\">Example</a></b>'\n filter = Bleach(\n tags=['a'],\n attributes=dict(a=['href', 'title'])\n )\n filtered = filter.filter(text)\n expected = '<a href=\"\">Example</a>'\n self.assertEquals(expected, filtered)", "def clean_whitespace(self, item):\n item['name'] = item['name'].strip()", "def _apply_filters(self, text, tag):\n\n # The order of the filters below is important\n # and should not be changed\n\n # intial_quotes needs to happen at this point so that\n # attribute values introduced later on do not get affected\n text = self.initial_quotes(text)\n text = self.smarty_pants(text)\n text = self.amp(text)\n text = self.caps(text)\n\n return text", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def test_remove_a_single_attribute(self):\n pass", "def _set_padding(self, attr, value):\n if not value:\n setattr(self, attr, \"\")\n else:\n value = str(value)\n if not value.isspace():\n raise ValueError(\"padding must be entirely whitespace\")\n setattr(self, attr, value)", "def strip_space():\n pass", "def _yamlSpeciallyHandledAttributes(self):\n return []", "def _prettify_attributes(self, config_entry, indentation_level):\n def get_string_representation(singular):\n return \"{0}: {1}{2}\".format(singular['@name'], str(singular['@value']), os.linesep)\n \n indent_level = indentation_level * 2\n string_representation = \"\"\n \n if 'attribute' in config_entry:\n if type(config_entry['attribute']) == list:\n for entry in config_entry['attribute']:\n string_representation = \"{0}{1}{2}\".format(string_representation, \" \"*indent_level, get_string_representation(entry))\n else:\n string_representation = \"{0}{1}\".format(\" \"*indent_level, get_string_representation(config_entry['attribute']))\n \n if len(string_representation) > 0 and string_representation[-1] == os.linesep:\n return string_representation[:-1]\n \n return string_representation", "def test_whitespaceStripFlagsAndParameters(self):\n # We test this by making sure aflag and it's help string are on the\n # same line.\n lines = [s for s in str(self.nice).splitlines() if s.find(\"aflag\")>=0]\n self.failUnless(len(lines) > 0)\n self.failUnless(lines[0].find(\"flagallicious\") >= 0)", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def test_spaces(self):\n self.assertValue({\n 'foo bar': 'something here',\n },\n \"foo_bar: something_here\\n\")", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])" ]
[ "0.75899607", "0.6015355", "0.60029346", "0.599833", "0.59812284", "0.590307", "0.58477765", "0.5798002", "0.5767309", "0.57589287", "0.5736964", "0.57327765", "0.566209", "0.555631", "0.55304635", "0.5522477", "0.55130064", "0.55026585", "0.55026585", "0.5475007", "0.54452765", "0.53795475", "0.5362959", "0.5345235", "0.53398156", "0.53361064", "0.53271633", "0.5325274", "0.53099597", "0.53051704" ]
0.7845027
0
Testing {% attr %} with "nocondense" option preserves whitespace
def test_with_nocondense_preserves_whitespace(self): t = Template('{% load djblets_utils %}' '<span{% attr "data-foo" nocondense %}\n' 'some \n\n' 'value\n' '{% endattr %}>') self.assertEqual( t.render(Context()), '<span data-foo="\nsome \n\nvalue\n">')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_condenses_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"some value\">')", "def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')", "def test_relaxed_spacing_no_title(self):\n\n expected = r'''\n <details class=\"relaxed spacing no title\">\n <summary>Relaxed</summary>\n <p>content</p>\n </details>\n '''\n\n self.check_markdown(\n r'''\n ???relaxed spacing no title\n content\n ''',\n expected,\n True\n )", "def test_attribute_not_equal_no_quotes(self):\n\n # No quotes\n self.assert_selector(\n self.MARKUP,\n 'body [id!=\\\\35]',\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def test_with_spaceless(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" spaceless %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span><strong>\\n test123\\n </strong></span>]')", "def test_escapes_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}<hello>{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"&lt;hello&gt;\">')", "def test_attribute_not_equal_double_quotes(self):\n\n # Double quotes\n self.assert_selector(\n self.MARKUP,\n 'body [id!=\"5\"]',\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')", "def test_attribute_not_equal_quotes(self):\n\n # Quotes\n self.assert_selector(\n self.MARKUP,\n \"body [id!='5']\",\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def no_underline_and_no_description(): # noqa: D416", "def test_parens_disabled():\n assert get_html(PARENS_TEXT) == \"<p>I am a ((parens)) example.</p>\"", "def test_avoids_bombing_on_none(self):\r\n test_value = None\r\n self.assertEqual(set(), suggest_tags(test_value))", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def test_collapsed_whitespace(self):\n self.assertSoupEquals(\"<p> </p>\", \"<p> </p>\")", "def test_braces_disabled():\n assert get_html(BRACES_TEXT) == \"<p>I am a {{braces}} example.</p>\"", "def test_as_default(self):\n self.assertEqual(render('{% default_as %}...{{ snake }}'), '...hisss')", "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def test_noop(self):\n html = '<div class=\"pink\">test</div>'\n css = ''\n self.assertEqual(html, inline_css(html, css, pretty_print=False))", "def __getTagText(self, tag):\n return ''.join(tag.findAll(text=True)).replace(unichr(160), ' ')", "def test_with_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}truthy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': True,\n })),\n '<span class=\"truthy\">')", "def test_set_attribute_override():\n elem = hr.Element(\n \"this is some text\",\n style=\"cheese\",\n answer=1,\n clas=\"spam\", # cspell:disable-line\n )\n elem.set_attributes(holy=\"grail\", answer=42, _clas=\"eggs\") # cspell:disable-line\n\n opening_tag = get_opening_line(elem)\n assert 'style=\"cheese\"' in opening_tag\n assert 'answer=\"42\"' in opening_tag\n assert 'class=\"eggs\"' in opening_tag\n assert 'holy=\"grail\"' in opening_tag", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def test_noformat_tags():\n format = Format(lambda s: s.lower())\n xml = '<%s>Hello, World!</%s>'\n format_tags = 'address div h1 p quote span'.split()\n noformat_tags = 'code kbd math pre script textarea'.split()\n for tag in format_tags + noformat_tags:\n x = xml % (tag, tag)\n s = serialize(x, format)\n if tag in format_tags:\n x = x.lower()\n assert s.endswith(x)", "def test_css_parsing_with_entities(data, styles, expected):\n css_sanitizer = CSSSanitizer(allowed_css_properties=styles)\n assert (\n clean(\n data, tags={\"p\"}, attributes={\"p\": [\"style\"]}, css_sanitizer=css_sanitizer\n )\n == expected\n )", "def test_custom_decorator_displaytex_no_maths(self):\n self.assertEqual(\n DOM.render(\n DOM.create_element(\n ashley_render_children,\n {\n \"block\": {\n \"key\": \"a215p\",\n \"text\": \"\",\n \"type\": \"atomic\",\n \"data\": {\"tex\": \"a common string\", \"type\": \"TEXBLOCK\"},\n }\n },\n )\n ),\n '<span class=\"ashley-latex-display\">a common string</span>',\n )", "def test_style_maintained(self):\n test_string = \"<p><font style='color: red'></p>\"\n cleaned = sanitizeFeedback(test_string)\n self.assertIn(\"style='color: red;'\", cleaned)\n\n test_string = \"<p><table border=\\\"1\\\"></table></p>\"\n cleaned = sanitizeFeedback(test_string)\n self.assertIn(\"border='1'\", cleaned)", "def test_bad_directives(style_checker):\n p = style_checker.run_style_checker('whatever', 'bad_directives.rst')\n style_checker.assertNotEqual(p.status, 0, p.image)\n style_checker.assertRunOutputEqual(p, \"\"\"\\\nbad_directives.rst:7: invalid directive syntax (':' should be '::')\n .. typo-directive-no-arg:\n ^\nbad_directives.rst:14: invalid directive syntax (':' should be '::')\n .. typo-directive-with-args: helo smtp\n ^\nbad_directives.rst:23: invalid directive syntax (':' should be '::')\n .. typo:With-Colors-not:ok:\n ^\nbad_directives.rst:25: invalid directive syntax (':' should be '::')\n .. typo:with-colors-NOT:ok: args1 two\n ^\n\"\"\")", "def test_can_filter_attributes(self):\n text = '<b><a href=\"\" target=\"_blank\">Example</a></b>'\n filter = Bleach(\n tags=['a'],\n attributes=dict(a=['href', 'title'])\n )\n filtered = filter.filter(text)\n expected = '<a href=\"\">Example</a>'\n self.assertEquals(expected, filtered)" ]
[ "0.76482457", "0.6110049", "0.6032022", "0.60097104", "0.59339833", "0.58609396", "0.5809474", "0.5775858", "0.57260346", "0.57034314", "0.5422834", "0.5383639", "0.5341832", "0.5255964", "0.51910955", "0.5153591", "0.5108174", "0.51074314", "0.50881976", "0.50743866", "0.5044767", "0.50419194", "0.503789", "0.49867377", "0.49853393", "0.49785313", "0.4973406", "0.49722537", "0.49649343", "0.49629748" ]
0.87957716
0
Testing {% definevar %}
def test_basic_usage(self): t = Template('{% load djblets_utils %}' '{% definevar "myvar" %}\n' 'test{{num}}\n' '{% enddefinevar %}' '{{myvar}}') self.assertEqual( t.render(Context({ 'num': 123, })), '\ntest123\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_global(self):\n t = Template(\n '{% load djblets_utils %}'\n '{% block main %}'\n '{% block inner %}'\n '{% definevar \"myvar\" global %}{{num}}{% enddefinevar %}'\n '{% endblock %}'\n '{% endblock %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[123]')", "def test_with_spaceless(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" spaceless %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span><strong>\\n test123\\n </strong></span>]')", "def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')", "def test_with_unsafe(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" unsafe %}<hello>{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(t.render(Context()), '&lt;hello&gt;')", "def test_define_variable(self):\n self.assertEqual(['define', 'test', '\"test\"'],\n grammar._DEFINE_VAR.parseString(\"#define test \\\"test\\\"\").asList())\n\n self.assertEqual(['define', 'test', \"f(w,x)\"],\n grammar._DEFINE_VAR.parseString(\"#define test f(w,x)\").asList())\n\n self.assertEqual(['define', 'test', '\"test1 test2\"'],\n grammar._DEFINE_VAR.parseString(\"#define test \\\"test1 test2\\\"\").asList())", "def test_with_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}truthy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': True,\n })),\n '<span class=\"truthy\">')", "def test_as_default(self):\n self.assertEqual(render('{% default_as %}...{{ snake }}'), '...hisss')", "def define(parser, token):\n\n bits = list(token.split_contents())\n\n if len(bits) != 2:\n raise TemplateSyntaxError(\"Expected format is: {% define variable %}\")\n\n name = bits[1]\n nodelist = parser.parse(('enddefine',))\n parser.delete_first_token()\n\n return DefineNode(name, nodelist)", "def test_if_variable_statement():\n r = convert_code(\n \"{if $foo}\\nbar\\n{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% endif %}\"", "def var():\n return Parse.next_token().if_type(['VAR']).expect('var')", "def test_simple(self):\n self.assertEqual(render('{% fish_as as out %}-{{ out }}'), '-fish')\n self.assertEqual(render('{% another_fish_as as out %}-{{ out }}'), '-fish')", "def test_variables_get(self):\n pass", "def test_expand_var(self):\n self.assertEqual(\"test\",\n grammar._EXPAND_VAR.parseString(\"$test\").name)", "def test_bad_placeholder_1(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My name is James.')", "def test_render_snippet_id(self):\n template = SnippetTemplateFactory(code='<p>{{ snippet_id }}</p>')\n eq_(template.render({'myvar': 'foo'}), '<p>0</p>')", "def test_bad_placeholder_1(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My name is James.\")", "def test_bad_placeholder_2(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My other name is James.')", "def register_var(tiling_var, val):\n globals()[tiling_var] = val", "def test_variable_simple(self):\r\n self.assertEquals(preview.latex_preview('x', variables=['x']), 'x')", "def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_bad_placeholder_2(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My other name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My other name is James.\")", "def test_variablepresentations_get(self):\n pass", "def check_template_variables(subject, vars):\n for var in vars:\n expect(subject).to(match(r'\\{\\{cookiecutter\\.' + var + '\\}\\}'))", "def angular_js_tests(request):\n return locals()", "def test_is_defined(project):\n project.add_mock_file(\"templates\", \"testtemplate.tmpl\",\n \"{% if other is defined %} {{name}} : {{ other.name }} \"\n \"{% if other.other is defined %} sub: {{ other.other.name }} {% endif %} \"\n \"{% else %} {{name}} is not defined {% endif %}\"\"\")\n\n project.compile(\"\"\"\nimport std\nimport unittest\n\nentity Test1:\nstring name\nend\n\nTest1 prev [0:1] -- [0:1] Test1 other\n\nimplementation tt for Test1:\ncontent=std::template(\"unittest/testtemplate.tmpl\")\nstd::print(content)\nend\n\nimplement Test1 using tt when not(self.prev is defined)\nimplement Test1 using std::none when self.prev is defined\n\nTest1(name=\"t1\",other=Test1(name=\"t11\"))\nTest1(name=\"t2\")\nTest1(name=\"t3\",other=Test1(name=\"t31\",other=Test1(name=\"t32\")))\n \"\"\")\n\n assert \"t3 : t31 sub: t32\" in project.get_stdout()\n assert \"t1 : t11\" in project.get_stdout()\n assert \"t2 is not defined\" in project.get_stdout()", "def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')", "def set_var(self,variable,value):\n self.template=self.template.replace(\"@{}@\".format(variable),value)", "def global_var(name: str) -> SetupVal:\n return GlobalVarVal(name)", "def test_parse_substitution_variable():\n assert parse_substitution_variable(\"${SOME_VAR}\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"$SOME_VAR\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"SOME_STRING\") is None\n assert parse_substitution_variable(\"SOME_$TRING\") is None\n assert parse_substitution_variable(\"${some_var}\") == \"some_var\"\n assert parse_substitution_variable(\"$some_var\") == \"some_var\"\n assert parse_substitution_variable(\"some_string\") is None\n assert parse_substitution_variable(\"some_$tring\") is None\n assert parse_substitution_variable(\"${SOME_$TRING}\") is None\n assert parse_substitution_variable(\"$SOME_$TRING\") == \"SOME_\"", "def test_variablepresentations_post(self):\n pass" ]
[ "0.7557205", "0.6654228", "0.66269934", "0.64311785", "0.6370524", "0.5851564", "0.58426505", "0.58310306", "0.58013564", "0.5758977", "0.5751494", "0.5745439", "0.5702883", "0.5684491", "0.5683458", "0.5608868", "0.5593703", "0.5558762", "0.55465716", "0.55460024", "0.5535757", "0.5522204", "0.5518274", "0.55014884", "0.5497682", "0.54797995", "0.547642", "0.54545254", "0.5445708", "0.54275364" ]
0.73995805
1
Testing {% definevar %} with global option
def test_with_global(self): t = Template( '{% load djblets_utils %}' '{% block main %}' '{% block inner %}' '{% definevar "myvar" global %}{{num}}{% enddefinevar %}' '{% endblock %}' '{% endblock %}' '[{{myvar}}]') self.assertEqual( t.render(Context({ 'num': 123, })), '[123]')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basic_usage(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" %}\\n'\n 'test{{num}}\\n'\n '{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '\\ntest123\\n')", "def global_var(name: str) -> SetupVal:\n return GlobalVarVal(name)", "def test_define_variable(self):\n self.assertEqual(['define', 'test', '\"test\"'],\n grammar._DEFINE_VAR.parseString(\"#define test \\\"test\\\"\").asList())\n\n self.assertEqual(['define', 'test', \"f(w,x)\"],\n grammar._DEFINE_VAR.parseString(\"#define test f(w,x)\").asList())\n\n self.assertEqual(['define', 'test', '\"test1 test2\"'],\n grammar._DEFINE_VAR.parseString(\"#define test \\\"test1 test2\\\"\").asList())", "def test_with_spaceless(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" spaceless %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span><strong>\\n test123\\n </strong></span>]')", "def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')", "def define(parser, token):\n\n bits = list(token.split_contents())\n\n if len(bits) != 2:\n raise TemplateSyntaxError(\"Expected format is: {% define variable %}\")\n\n name = bits[1]\n nodelist = parser.parse(('enddefine',))\n parser.delete_first_token()\n\n return DefineNode(name, nodelist)", "def test_with_unsafe(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" unsafe %}<hello>{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(t.render(Context()), '&lt;hello&gt;')", "def set_jinja_before_request():\n resource_provider.set_jinja_globals()", "def register_var(tiling_var, val):\n globals()[tiling_var] = val", "def test_with_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}truthy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': True,\n })),\n '<span class=\"truthy\">')", "def setGlobal(name, value):", "def global_variables(request):\n data = {\n 'DEBUG': settings.DEBUG,\n }\n return data", "def set_v(self, varname: str, varval: Optional[str]) -> None:\n\n if varval:\n self.pandoc.append('-V')\n self.pandoc.append(f'{varname}={varval}')", "def test_as_default(self):\n self.assertEqual(render('{% default_as %}...{{ snake }}'), '...hisss')", "def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_bad_placeholder_1(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My name is James.')", "def test_setting_default(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n context = context_processors.decide_base_template(request)\n self.assertEqual(context['base_template'], \"base_site.html\")", "def test_bad_placeholder_2(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My other name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My other name is James.\")", "def test_bad_placeholder_2(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My other name is James.')", "def test_is_defined(project):\n project.add_mock_file(\"templates\", \"testtemplate.tmpl\",\n \"{% if other is defined %} {{name}} : {{ other.name }} \"\n \"{% if other.other is defined %} sub: {{ other.other.name }} {% endif %} \"\n \"{% else %} {{name}} is not defined {% endif %}\"\"\")\n\n project.compile(\"\"\"\nimport std\nimport unittest\n\nentity Test1:\nstring name\nend\n\nTest1 prev [0:1] -- [0:1] Test1 other\n\nimplementation tt for Test1:\ncontent=std::template(\"unittest/testtemplate.tmpl\")\nstd::print(content)\nend\n\nimplement Test1 using tt when not(self.prev is defined)\nimplement Test1 using std::none when self.prev is defined\n\nTest1(name=\"t1\",other=Test1(name=\"t11\"))\nTest1(name=\"t2\")\nTest1(name=\"t3\",other=Test1(name=\"t31\",other=Test1(name=\"t32\")))\n \"\"\")\n\n assert \"t3 : t31 sub: t32\" in project.get_stdout()\n assert \"t1 : t11\" in project.get_stdout()\n assert \"t2 is not defined\" in project.get_stdout()", "def test_bad_placeholder_1(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My name is James.\")", "def set_var(self,variable,value):\n self.template=self.template.replace(\"@{}@\".format(variable),value)", "def var():\n return Parse.next_token().if_type(['VAR']).expect('var')", "def angular_js_tests(request):\n return locals()", "def test_default(self):\r\n self.assertEqual(self.option.default, 'testing')", "def test_setting_override(self):\n request = mock.Mock()\n request.resolver_match.kwargs.get.return_value = None\n request.path = '/'\n context = context_processors.decide_base_template(request)\n self.assertEqual(context['base_template'], \"test.html\")", "def conftest(opts):\n template = get_template(\"conftest\")\n return template.safe_substitute(opts)", "def test_default(self):\r\n self.assertEqual(self.option.default, 'hello')", "def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')", "def defineVariable(self, file, var, axesArgString):\n if self.isVariableDefined(var.id):\n self.warningWidget.showWarning(var.id, file, var, axesArgString)\n else:\n self.addVariable(var.id, file, var, axesArgString)" ]
[ "0.6690785", "0.6217259", "0.58671683", "0.57850444", "0.57299507", "0.56862706", "0.56507456", "0.55629444", "0.5521804", "0.5510736", "0.55011946", "0.5426911", "0.5374006", "0.5332082", "0.52935004", "0.5253905", "0.5244905", "0.5229635", "0.52291816", "0.5228609", "0.52267665", "0.5198305", "0.5195288", "0.51775765", "0.51517475", "0.51376575", "0.5132182", "0.51155376", "0.5101425", "0.5098433" ]
0.76271826
0
Testing {% definevar %} with strip option
def test_with_strip(self): t = Template('{% load djblets_utils %}' '{% definevar "myvar" strip %}\n' '<span>\n' ' <strong>\n' ' test{{num}}\n' ' </strong>\n' '</span>\n' '{% enddefinevar %}' '[{{myvar}}]') self.assertEqual( t.render(Context({ 'num': 123, })), '[<span>\n <strong>\n test123\n </strong>\n</span>]')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_spaceless(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" spaceless %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span><strong>\\n test123\\n </strong></span>]')", "def test_with_nocondense_preserves_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" nocondense %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"\\nsome \\n\\nvalue\\n\">')", "def test_with_unsafe(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" unsafe %}<hello>{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(t.render(Context()), '&lt;hello&gt;')", "def test_basic_usage(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" %}\\n'\n 'test{{num}}\\n'\n '{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '\\ntest123\\n')", "def test_condenses_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"some value\">')", "def test_as_default(self):\n self.assertEqual(render('{% default_as %}...{{ snake }}'), '...hisss')", "def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')", "def cleanUpDefine(define):\n # Remove extra quotes and trailing spacess\n cleanDefine = define.strip()\n cleanDefine = REMOVE_QUOTE_REGEX.sub(\"\", cleanDefine)\n # Normalize paths in defines if any present.\n cleanDefine = ntpath.normpath(cleanDefine)\n return cleanDefine", "def test_unquoted(self):\n e = yaenv.core.EnvVar('key = value\\n')\n assert e.key == 'key'\n assert e.value == 'value'\n assert e._interpolate", "def test_if_variable_statement():\n r = convert_code(\n \"{if $foo}\\nbar\\n{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% endif %}\"", "def test_bad_placeholder_1(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My name is James.')", "def test_with_no_commas(self):\r\n self.render_template('\"file1\" \"file2\" \"file3\"')", "def test_with_global(self):\n t = Template(\n '{% load djblets_utils %}'\n '{% block main %}'\n '{% block inner %}'\n '{% definevar \"myvar\" global %}{{num}}{% enddefinevar %}'\n '{% endblock %}'\n '{% endblock %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[123]')", "def test_bad_placeholder_2(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My other name is James.')", "def test_allow_unknown():\n template = 'name=\"{name}\" value=\"{value}\"'\n fmt = FormatTemplate(remove_unused=False)\n result = fmt(template)\n assert result == template", "def test_parse_substitution_variable():\n assert parse_substitution_variable(\"${SOME_VAR}\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"$SOME_VAR\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"SOME_STRING\") is None\n assert parse_substitution_variable(\"SOME_$TRING\") is None\n assert parse_substitution_variable(\"${some_var}\") == \"some_var\"\n assert parse_substitution_variable(\"$some_var\") == \"some_var\"\n assert parse_substitution_variable(\"some_string\") is None\n assert parse_substitution_variable(\"some_$tring\") is None\n assert parse_substitution_variable(\"${SOME_$TRING}\") is None\n assert parse_substitution_variable(\"$SOME_$TRING\") == \"SOME_\"", "def test_bad_placeholder_1(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My name is James.\")", "def test_bad_placeholder_2(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My other name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My other name is James.\")", "def strip_directive (name):\n return RE_AUTOSTRIP.sub ('', name)", "def test_if_filter_statement():\n r = convert_code(\n \"{if awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def test_simple(self):\n self.assertEqual(render('{% fish_as as out %}-{{ out }}'), '-fish')\n self.assertEqual(render('{% another_fish_as as out %}-{{ out }}'), '-fish')", "def test_none_content_object_production(self):\n tmpl = Template(\"\"\"\n output:\n {% load editregion %}\n {% editregion \"test\" None %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('output:', tmpl.render(Context()).strip())", "def remove_sensitive_var_name(cls, var_name: str) -> str:\n if var_name.endswith('_path'):\n var_name = var_name.replace('_path', '')\n if var_name.endswith('_file'):\n var_name = var_name.replace('_file', '')\n return var_name", "def test_blank_content_object_production(self):\n tmpl = Template(\"\"\"\n {% load editregion %}\n {% editregion \"test\" obj %}fallback{% endeditregion %}\n \"\"\")\n self.assertEqual('fallback', tmpl.render(Context()).strip())", "def _var_quote_sub(self, text, VARS):\n ## No need to live on class. Can be moved to tools. - Add assert test.\n qvars = map(lambda x: \"\\{ \" + x + \" \\}\", VARS)\n return text % tuple(qvars)", "def strip_value(value, arg):\n return value.replace(arg, '')", "def test_sanitized_trim(self):\n value = \" sample \"\n response = clean.trim(value)\n assert response == \"sample\"\n assert type(response) == str", "def strip_variables(*args):\n return [arg.strip(\" '\\\"\") if arg is not None else arg for arg in args]", "def test_if_and_filter_statement():\n r = convert_code(\n \"{if foo and awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if foo and awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def _sanitize(opt, value):\n return value if not opt.secret else '*' * 4" ]
[ "0.67545617", "0.6157952", "0.5923703", "0.5849991", "0.5803739", "0.57914233", "0.5790824", "0.5713735", "0.55374765", "0.54750603", "0.54729444", "0.5461906", "0.5449159", "0.54380345", "0.5423991", "0.54140425", "0.5406714", "0.5388676", "0.53626126", "0.53512734", "0.5298583", "0.5287102", "0.5266631", "0.5258129", "0.5199696", "0.51801795", "0.5151347", "0.5142997", "0.51221645", "0.5094372" ]
0.8216071
0
Testing {% definevar %} with spaceless option
def test_with_spaceless(self): t = Template('{% load djblets_utils %}' '{% definevar "myvar" spaceless %}\n' '<span>\n' ' <strong>\n' ' test{{num}}\n' ' </strong>\n' '</span>\n' '{% enddefinevar %}' '[{{myvar}}]') self.assertEqual( t.render(Context({ 'num': 123, })), '[<span><strong>\n test123\n </strong></span>]')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')", "def test_basic_usage(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" %}\\n'\n 'test{{num}}\\n'\n '{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '\\ntest123\\n')", "def test_with_global(self):\n t = Template(\n '{% load djblets_utils %}'\n '{% block main %}'\n '{% block inner %}'\n '{% definevar \"myvar\" global %}{{num}}{% enddefinevar %}'\n '{% endblock %}'\n '{% endblock %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[123]')", "def test_with_unsafe(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" unsafe %}<hello>{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(t.render(Context()), '&lt;hello&gt;')", "def test_condenses_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"some value\">')", "def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')", "def test_as_default(self):\n self.assertEqual(render('{% default_as %}...{{ snake }}'), '...hisss')", "def test_with_nocondense_preserves_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" nocondense %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"\\nsome \\n\\nvalue\\n\">')", "def test_bad_placeholder_1(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My name is James.')", "def test_with_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}truthy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': True,\n })),\n '<span class=\"truthy\">')", "def test_bad_placeholder_2(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My other name is James.')", "def test_bad_placeholder_1(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My name is James.\")", "def test_simple(self):\n self.assertEqual(render('{% fish_as as out %}-{{ out }}'), '-fish')\n self.assertEqual(render('{% another_fish_as as out %}-{{ out }}'), '-fish')", "def test_bad_placeholder_2(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My other name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My other name is James.\")", "def test_if_variable_statement():\n r = convert_code(\n \"{if $foo}\\nbar\\n{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% endif %}\"", "def test_optional(self):\n self.assertEqual(render('{% maybe_as %}-{{ out }}'), 'maybe-')\n self.assertEqual(render('{% maybe_as as out %}-{{ out }}'), '-maybe')", "def test_define_variable(self):\n self.assertEqual(['define', 'test', '\"test\"'],\n grammar._DEFINE_VAR.parseString(\"#define test \\\"test\\\"\").asList())\n\n self.assertEqual(['define', 'test', \"f(w,x)\"],\n grammar._DEFINE_VAR.parseString(\"#define test f(w,x)\").asList())\n\n self.assertEqual(['define', 'test', '\"test1 test2\"'],\n grammar._DEFINE_VAR.parseString(\"#define test \\\"test1 test2\\\"\").asList())", "def test_expand_var(self):\n self.assertEqual(\"test\",\n grammar._EXPAND_VAR.parseString(\"$test\").name)", "def var():\n return Parse.next_token().if_type(['VAR']).expect('var')", "def test_variable_simple(self):\r\n self.assertEquals(preview.latex_preview('x', variables=['x']), 'x')", "def define(parser, token):\n\n bits = list(token.split_contents())\n\n if len(bits) != 2:\n raise TemplateSyntaxError(\"Expected format is: {% define variable %}\")\n\n name = bits[1]\n nodelist = parser.parse(('enddefine',))\n parser.delete_first_token()\n\n return DefineNode(name, nodelist)", "def test_parse_substitution_variable():\n assert parse_substitution_variable(\"${SOME_VAR}\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"$SOME_VAR\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"SOME_STRING\") is None\n assert parse_substitution_variable(\"SOME_$TRING\") is None\n assert parse_substitution_variable(\"${some_var}\") == \"some_var\"\n assert parse_substitution_variable(\"$some_var\") == \"some_var\"\n assert parse_substitution_variable(\"some_string\") is None\n assert parse_substitution_variable(\"some_$tring\") is None\n assert parse_substitution_variable(\"${SOME_$TRING}\") is None\n assert parse_substitution_variable(\"$SOME_$TRING\") == \"SOME_\"", "def test_allow_unknown():\n template = 'name=\"{name}\" value=\"{value}\"'\n fmt = FormatTemplate(remove_unused=False)\n result = fmt(template)\n assert result == template", "def set_v(self, varname: str, varval: Optional[str]) -> None:\n\n if varval:\n self.pandoc.append('-V')\n self.pandoc.append(f'{varname}={varval}')", "def test_mapping(self):\n vark = VarKeyword()\n assert vark.name in vark\n assert '{}_'.format(vark.name) not in vark\n assert len(vark) == 1\n assert list(vark) == [vark.name]", "def test_escapes_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}<hello>{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"&lt;hello&gt;\">')", "def test_stop_words():\n stop_manage = \"hello\"\n\n assert stop_manage is not None\n assert stop_manage", "def testTitleTemplateSubstitute(self):\n\n\t\tfield_values = {'abc': 'ABC', 'a.1': 'VALUE'}\n\n\t\ttests = {\n\t\t\t'${abc} $$ ${} ${{{} ${abc}': 'ABC $ ${} ${{{} ABC',\n\t\t\t'$abc ${a.1} $$$$': '$abc VALUE $$'\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tt = TitleTemplate(test)\n\t\t\tself.assertEqual(t.substitute(field_values), tests[test])", "def test_add_var_desc():\n v = dd.vars['WGT']\n \n assert add_var_desc('Housing ', dd, 'WGT') == 'WGT'\n assert v.vardesc == 'Housing'\n\n \"\"\" Test add second line \"\"\"\n assert add_var_desc(' Unit Weight', dd, 'WGT') == 'WGT'\n assert v.vardesc == 'Housing Unit Weight'\n\n \"\"\" Test prevention against duplication \"\"\"\n assert add_var_desc('Housing Unit Weight', dd, 'WGT') == 'WGT'\n assert add_var_desc('HousingUnit Weight', dd, 'WGT') == 'WGT'\n\n assert add_var_desc('Person', dd, 'PWGT') == None", "def test_instance_vars_present(question, question_text):\n instance = question[\"instance\"]\n for v in instance.get(\"variables\", {}):\n v_pattern = \"${\" + v + \"}\"\n assert v_pattern in question_text" ]
[ "0.7116686", "0.67886454", "0.67262304", "0.64860123", "0.6057726", "0.6020129", "0.59924453", "0.59869397", "0.5836771", "0.58309156", "0.57882994", "0.57387507", "0.5709727", "0.57092327", "0.56692076", "0.5519866", "0.5509027", "0.5428919", "0.53868854", "0.5332345", "0.52887166", "0.5288228", "0.52823824", "0.5240731", "0.5202727", "0.5200928", "0.51663923", "0.5143486", "0.51320976", "0.5108626" ]
0.7949295
0
Testing {% definevar %} with unsafe option
def test_with_unsafe(self): t = Template('{% load djblets_utils %}' '{% definevar "myvar" unsafe %}<hello>{% enddefinevar %}' '{{myvar}}') self.assertEqual(t.render(Context()), '&lt;hello&gt;')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')", "def test_with_spaceless(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" spaceless %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span><strong>\\n test123\\n </strong></span>]')", "def test_with_global(self):\n t = Template(\n '{% load djblets_utils %}'\n '{% block main %}'\n '{% block inner %}'\n '{% definevar \"myvar\" global %}{{num}}{% enddefinevar %}'\n '{% endblock %}'\n '{% endblock %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[123]')", "def test_basic_usage(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" %}\\n'\n 'test{{num}}\\n'\n '{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '\\ntest123\\n')", "def test_bad_placeholder_1(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My name is James.')", "def test_bad_placeholder_1(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My name is James.\")", "def test_bad_placeholder_2(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My other name is James.')", "def test_bad_placeholder_2(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My other name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My other name is James.\")", "def test_bug_652575():\n assert _do_test_raw(\"var x = 'capability.policy.';\").failed()", "def allow_unresolved_variable_tokens(self):\n return self._allow_unresolved_variable_tokens", "def test_special_strings(self, vector):\n vector.get_value('exec_option')['enable_expr_rewrites'] = \\\n vector.get_value('enable_expr_rewrites')\n self.run_test_case('QueryTest/special-strings', vector)", "def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')", "def test_allow_unknown():\n template = 'name=\"{name}\" value=\"{value}\"'\n fmt = FormatTemplate(remove_unused=False)\n result = fmt(template)\n assert result == template", "def set_var(self,variable,value):\n self.template=self.template.replace(\"@{}@\".format(variable),value)", "def test_escapes_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}<hello>{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"&lt;hello&gt;\">')", "def test_as_default(self):\n self.assertEqual(render('{% default_as %}...{{ snake }}'), '...hisss')", "def test_if_variable_statement():\n r = convert_code(\n \"{if $foo}\\nbar\\n{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% endif %}\"", "def _sanitize(opt, value):\n return value if not opt.secret else '*' * 4", "def test_render_to_html_valid_variables(self):\n marker = \"TEMPLATESTRINGINVALID\"\n settings.TEMPLATE_STRING_IF_INVALID = marker\n\n issue = Issue.objects.create(newsletter=self.newsletter,\n pub_date=datetime.date.today())\n html = issue.render_to_html(\n html_template_name='bulletin/api/test/html_template.html')\n\n self.assertEqual(html.find(marker), -1)", "def js_var(var, raw):\n lestr = r\"\\b{0}\\s*=\\s*\\\"([^\\\"]+)\".format(var)\n match = search(lestr, raw)\n return None if match is None else match.group(1)", "def sanitize_python_var_name(var_name, replacement_text=\"\"):\n\n var_name = var_name.strip()\n sanitize_var_name = __RE_INVALID_VAR_NAME.sub(\n replacement_text, var_name)\n\n # delete invalid char(s) in the beginning of the variable name\n is_delete_head = any([\n dataproperty.is_empty_string(replacement_text),\n __RE_INVALID_VAR_NAME_HEAD.search(replacement_text) is not None,\n ])\n\n if is_delete_head:\n sanitize_var_name = __RE_INVALID_VAR_NAME_HEAD.sub(\n \"\", sanitize_var_name)\n else:\n match = __RE_INVALID_VAR_NAME_HEAD.search(sanitize_var_name)\n if match is not None:\n sanitize_var_name = (\n match.end() * replacement_text +\n __RE_INVALID_VAR_NAME_HEAD.sub(\"\", sanitize_var_name)\n )\n\n validate_python_var_name(sanitize_var_name)\n\n return sanitize_var_name", "def test_var_names(var_name):\n assert isinstance(var_name, str)\n if standard_names.is_valid_name(var_name):\n standard_names.StandardName(var_name)\n else:\n warnings.warn(\"not a valid standard name: {name}\".format(name=var_name))", "def test_make_fname_js_safe_no_change():\n\n safe = \"abc\"\n expected = \"abc\"\n\n assert expected == u.make_fname_js_safe(safe)", "def insert_evaluate_variables(text, var_dict):\n if isinstance(text, list):\n text.insert(0, '{% load quest_render_tags %}')\n rndr_string = '\\n'.join(text)\n else:\n rndr_string = r'{% load quest_render_tags %} ' + text\n\n var_dict_rendered = {}\n for key, values in var_dict.iteritems():\n var_dict_rendered[key] = values[1]\n\n tmplte = Template(rndr_string)\n cntxt = Context(var_dict_rendered)\n return tmplte.render(cntxt)", "def test_with_nocondense_preserves_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" nocondense %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"\\nsome \\n\\nvalue\\n\">')", "def test_parse_substitution_variable():\n assert parse_substitution_variable(\"${SOME_VAR}\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"$SOME_VAR\") == \"SOME_VAR\"\n assert parse_substitution_variable(\"SOME_STRING\") is None\n assert parse_substitution_variable(\"SOME_$TRING\") is None\n assert parse_substitution_variable(\"${some_var}\") == \"some_var\"\n assert parse_substitution_variable(\"$some_var\") == \"some_var\"\n assert parse_substitution_variable(\"some_string\") is None\n assert parse_substitution_variable(\"some_$tring\") is None\n assert parse_substitution_variable(\"${SOME_$TRING}\") is None\n assert parse_substitution_variable(\"$SOME_$TRING\") == \"SOME_\"", "def var_ref_from_unsafe_huh(varref):\n return varref.is_unsafe()", "def test_unquoted(self):\n e = yaenv.core.EnvVar('key = value\\n')\n assert e.key == 'key'\n assert e.value == 'value'\n assert e._interpolate", "def test_flonum_unsafe(doctest):", "def test_if_filter_statement():\n r = convert_code(\n \"{if awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"" ]
[ "0.64857894", "0.64418113", "0.61597216", "0.61581916", "0.59256214", "0.58586115", "0.58135176", "0.57546264", "0.5553935", "0.5450713", "0.54189855", "0.54012203", "0.5380707", "0.5361904", "0.5353325", "0.5328995", "0.532316", "0.52441657", "0.51985097", "0.5165681", "0.5133034", "0.5131574", "0.5129735", "0.51153195", "0.5109016", "0.5098025", "0.5085764", "0.5067699", "0.50543636", "0.5002724" ]
0.81571186
0
Testing {{...|escapespaces}} with single space
def test_with_single_space(self): self.assertEqual(escapespaces('Hi there'), 'Hi there')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_spaceless(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" spaceless %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span><strong>\\n test123\\n </strong></span>]')", "def test_with_multiple_spaces(self):\n self.assertEqual(escapespaces('Hi there'),\n 'Hi&nbsp; there')", "def test_condenses_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"some value\">')", "def test_spaces(self):\n self.assertValue({\n 'foo bar': 'something here',\n },\n \"foo_bar: something_here\\n\")", "def test_space_replacements(self):\n testString = sanitize(' a b c d ', '_')\n self.assertEqual(testString, '__a_b_c_d___')", "def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')", "def test_escapes_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}<hello>{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"&lt;hello&gt;\">')", "def test_if_filter_statement():\n r = convert_code(\n \"{if awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def test_with_nocondense_preserves_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" nocondense %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"\\nsome \\n\\nvalue\\n\">')", "def test_escape_argument_with_space():\n encoded = win_functions.escape_argument(\"with space\")\n assert encoded == '^\"with space^\"'", "def test_with_newline(self):\n self.assertEqual(escapespaces('Hi there\\n'),\n 'Hi&nbsp; there<br />')", "def test_as_default(self):\n self.assertEqual(render('{% default_as %}...{{ snake }}'), '...hisss')", "def test_simple(self):\n self.assertEqual(render('{% fish_as as out %}-{{ out }}'), '-fish')\n self.assertEqual(render('{% another_fish_as as out %}-{{ out }}'), '-fish')", "def test_special_characters(self):\n testString = sanitize('[-;]\\`{\\}')\n self.assertEqual(testString, '_________')", "def output_space(value):\n tpl_args = value.split(':')\n template = tpl_args[0]\n spec = {}\n for modifier in tpl_args[1:]:\n mitems = modifier.split('-', 1)\n spec[mitems[0]] = len(mitems) == 1 or mitems[1]\n\n return template, spec", "def test_braces_disabled():\n assert get_html(BRACES_TEXT) == \"<p>I am a {{braces}} example.</p>\"", "def test_if_and_filter_statement():\n r = convert_code(\n \"{if foo and awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if foo and awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def testTitleTemplateSubstitute(self):\n\n\t\tfield_values = {'abc': 'ABC', 'a.1': 'VALUE'}\n\n\t\ttests = {\n\t\t\t'${abc} $$ ${} ${{{} ${abc}': 'ABC $ ${} ${{{} ABC',\n\t\t\t'$abc ${a.1} $$$$': '$abc VALUE $$'\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tt = TitleTemplate(test)\n\t\t\tself.assertEqual(t.substitute(field_values), tests[test])", "def format_template(template, *args):\n return textwrap.dedent(template % args).strip()", "def test_symlit_escape():\n return \"\\\"=\\\"\"", "def test_parse_quotes_not_three_vertical_bars(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"did not find 3 '|' characters\")):\n api.parse_quote(\" This is a quote||\", simple_format=False)", "def test_with_unsafe(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" unsafe %}<hello>{% enddefinevar %}'\n '{{myvar}}')\n\n self.assertEqual(t.render(Context()), '&lt;hello&gt;')", "def test_bad_placeholder_2(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My other name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My other name is James.')", "def test_bad_placeholder_1(self):\n with translation.override('fr'):\n t = Template('{% load i18n %}{% blocktrans %}My name is {{ person }}.{% endblocktrans %}')\n rendered = t.render(Context({'person': 'James'}))\n self.assertEqual(rendered, 'My name is James.')", "def test_code(self):\n self.assertEquals(\"\\n\\tline1\\n\\tline2\",\n trans(\"{{{\\nline1\\nline2\\n}}}\"))", "def test_if_variable_statement():\n r = convert_code(\n \"{if $foo}\\nbar\\n{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% endif %}\"", "def test_allow_unknown():\n template = 'name=\"{name}\" value=\"{value}\"'\n fmt = FormatTemplate(remove_unused=False)\n result = fmt(template)\n assert result == template", "def test_preserved_whitespace_in_pre_and_textarea(self):\n self.assertSoupEquals(\"<pre> </pre>\")\n self.assertSoupEquals(\"<textarea> woo </textarea>\")", "def embeded_triple_quotes():\n pass", "def test_bad_placeholder_2(self):\n with translation.override(\"fr\"):\n t = Template(\n \"{% load i18n %}{% blocktranslate %}My other name is {{ person }}.\"\n \"{% endblocktranslate %}\"\n )\n rendered = t.render(Context({\"person\": \"James\"}))\n self.assertEqual(rendered, \"My other name is James.\")" ]
[ "0.68032426", "0.67486656", "0.6638375", "0.62644416", "0.60793024", "0.6068157", "0.59095526", "0.5902652", "0.5900635", "0.5815562", "0.5795822", "0.577586", "0.5750066", "0.57357484", "0.5699933", "0.5664234", "0.56113553", "0.5590534", "0.5538899", "0.55047613", "0.5451443", "0.5448996", "0.5442951", "0.5419907", "0.5405341", "0.53978735", "0.5390227", "0.5338342", "0.53295034", "0.5301598" ]
0.70163834
0
Testing {{...|escapespaces}} with multiple consecutive spaces
def test_with_multiple_spaces(self): self.assertEqual(escapespaces('Hi there'), 'Hi&nbsp; there')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_single_space(self):\n self.assertEqual(escapespaces('Hi there'),\n 'Hi there')", "def test_with_spaceless(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" spaceless %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span><strong>\\n test123\\n </strong></span>]')", "def test_condenses_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"some value\">')", "def test_spaces(self):\n self.assertValue({\n 'foo bar': 'something here',\n },\n \"foo_bar: something_here\\n\")", "def test_space_replacements(self):\n testString = sanitize(' a b c d ', '_')\n self.assertEqual(testString, '__a_b_c_d___')", "def output_space(value):\n tpl_args = value.split(':')\n template = tpl_args[0]\n spec = {}\n for modifier in tpl_args[1:]:\n mitems = modifier.split('-', 1)\n spec[mitems[0]] = len(mitems) == 1 or mitems[1]\n\n return template, spec", "def test_with_newline(self):\n self.assertEqual(escapespaces('Hi there\\n'),\n 'Hi&nbsp; there<br />')", "def test_parse_quotes_not_three_vertical_bars(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"did not find 3 '|' characters\")):\n api.parse_quote(\" This is a quote||\", simple_format=False)", "def test_remove_multiple_spaces():\n questions_parser = QuestionsParser()\n assert questions_parser.remove_multiple_spaces('Sentence with multiple spaces') == 'Sentence with multiple spaces'", "def test_preserved_whitespace_in_pre_and_textarea(self):\n self.assertSoupEquals(\"<pre> </pre>\")\n self.assertSoupEquals(\"<textarea> woo </textarea>\")", "def test_double_spaces():\n assert my_splitter(\"string with !@#$double spaces\", \" \") == \\\n [\"string\", \"\", \"with\", \"\", \"!@#$double\", \"\", \"spaces\"]", "def test_with_nocondense_preserves_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" nocondense %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"\\nsome \\n\\nvalue\\n\">')", "def test_indented_with_spaces(question_text, question_path):\n if \"\\t\" in question_text:\n raise ValueError(\n \"Found tab indentation in question {}. Please run \\\"sed -i '' 's/\\\\\\\\t/ /g' {}\\\" to switch to spaces.\".format(\n question_path, path.join(REPO, question_path)\n )\n )", "def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')", "def test_collapsed_whitespace(self):\n self.assertSoupEquals(\"<p> </p>\", \"<p> </p>\")", "def html_space(text):\r\n return cgi.escape(text).replace('\\t', ' ').replace(' ', '&nbsp;')", "def test_escape_argument_with_space():\n encoded = win_functions.escape_argument(\"with space\")\n assert encoded == '^\"with space^\"'", "def test_space_at_the_end():\n line = \"n\" * 79 + \" \"\n print \"--%s--\" % wrap_line(line)\n assert wrap_line(line) == \"n\" * 79", "def test_special_characters(self):\n testString = sanitize('[-;]\\`{\\}')\n self.assertEqual(testString, '_________')", "def white_spaces(value):\n if re.search(r'[\\s]', value):\n raise ValidationError(_('El login no puede contener espacios en blanco'))", "def test_format_simple_spaces(self) -> None:\n assert self._file_read(\"expect.css\").replace(\n \"\\t\", \" \"\n ) == CSSFormatter._format_str(self._file_read(\"input.css\"), indent_tabs=False)", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def test_before_space():\n \n \n assert(1 == before_space(\"1 2 3\"))\n assert(\"NO SPACE\" == before_space(\"1\"))\n assert(\"Error\" == before_space(None))", "def test_braces_disabled():\n assert get_html(BRACES_TEXT) == \"<p>I am a {{braces}} example.</p>\"", "def make_spaces_for_html(indent_num: int) -> str:\r\n from apysc.validation import number_validation\r\n number_validation.validate_integer(integer=indent_num)\r\n number_validation.validate_num_is_gte_zero(num=indent_num)\r\n spaces: str = ' ' * (indent_num * 2)\r\n return spaces", "def embeded_triple_quotes():\n pass", "def test_relaxed_spacing_no_title(self):\n\n expected = r'''\n <details class=\"relaxed spacing no title\">\n <summary>Relaxed</summary>\n <p>content</p>\n </details>\n '''\n\n self.check_markdown(\n r'''\n ???relaxed spacing no title\n content\n ''',\n expected,\n True\n )", "def test_quoted(self):\n exp = ['(', '(', 'h ', ',', 'p', ')', 'h p', ',', \"g()[],':_\", ')', 'hpg', ';']\n content = \"((h_ ,'p')h p,'g()[],'':_')hpg;\"\n self._do_test(content, exp)\n content = \"(('h ',p)h p,'g()[],'':_')hpg;\"\n self._do_test(content, exp)", "def test_if_filter_statement():\n r = convert_code(\n \"{if awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def block_indent(text, spaces=4):\n return '\\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])" ]
[ "0.67469984", "0.6433338", "0.63909113", "0.61706376", "0.616987", "0.603858", "0.57823545", "0.5732287", "0.5717638", "0.55804926", "0.55595803", "0.5544655", "0.5468769", "0.5451948", "0.5430117", "0.5414154", "0.538847", "0.53717864", "0.5336291", "0.52358156", "0.5225007", "0.5220537", "0.520743", "0.5204014", "0.5185115", "0.5179588", "0.51773816", "0.5156698", "0.51546335", "0.51430404" ]
0.6839661
0
Testing {{...|escapespaces}} with newline
def test_with_newline(self): self.assertEqual(escapespaces('Hi there\n'), 'Hi&nbsp; there<br />')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_newlines(self):\n self.assertValue({\n \"foo\": \"something\\nwith\\nnewlines\",\n },\n \"foo: something_with_newlines\\n\")", "def test_code(self):\n self.assertEquals(\"\\n\\tline1\\n\\tline2\",\n trans(\"{{{\\nline1\\nline2\\n}}}\"))", "def test_with_multiple_lines(self):\n self.assertEqual(indent('foo\\nbar'),\n ' foo\\n bar')", "def test_space_at_the_end():\n line = \"n\" * 79 + \" \"\n print \"--%s--\" % wrap_line(line)\n assert wrap_line(line) == \"n\" * 79", "def test_parse_simple_quote_with_newline(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a newline (0x0a) character\")):\n api.parse_quote(\" Quote with \\n character - Author\", simple_format=True)", "def test_spaces(self):\n self.assertValue({\n 'foo bar': 'something here',\n },\n \"foo_bar: something_here\\n\")", "def test_with_single_space(self):\n self.assertEqual(escapespaces('Hi there'),\n 'Hi there')", "def test_condenses_whitespace(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"data-foo\" %}\\n'\n 'some \\n\\n'\n 'value\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context()),\n '<span data-foo=\"some value\">')", "def test_with_multiple_spaces(self):\n self.assertEqual(escapespaces('Hi there'),\n 'Hi&nbsp; there')", "def test_if_else_statement():\n r = convert_code(\"{if foo}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% else %}\\nfoo{% endif %}\"", "def test_yaml_newline(args, exp_newl_end, maptype):\n data = {\"randkey\": \"arbval\"}\n assert maptype(data).to_yaml(*args).endswith(\"\\n\") is exp_newl_end", "def test_with_spaceless(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" spaceless %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span><strong>\\n test123\\n </strong></span>]')", "def test_text_multiline(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(text=\"abc\\ndef\")), \":warning: **abc**\\ndef\")", "def test_assert_does_not_contain_newline(self):\n\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a newline (0x0a) character\")):\n api._assert_does_not_contain(\"There is a newline (\\n) in this string.\", \"\\n\", \"quote\")", "def test_if_filter_statement():\n r = convert_code(\n \"{if awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def test_if_variable_statement():\n r = convert_code(\n \"{if $foo}\\nbar\\n{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% endif %}\"", "def format_template(template, *args):\n return textwrap.dedent(template % args).strip()", "def test_with_2_lines():\n line = \"n\" * 15 + \"\\n\" + \"n\" * 60 + \" \" + \"n\" * 10\n assert wrap_line(line) == \"n\" * 15 + \" \" + \"n\" * 60 + \"\\n\" + \"n\" * 10", "def test_with_strip(self):\n t = Template('{% load djblets_utils %}'\n '{% definevar \"myvar\" strip %}\\n'\n '<span>\\n'\n ' <strong>\\n'\n ' test{{num}}\\n'\n ' </strong>\\n'\n '</span>\\n'\n '{% enddefinevar %}'\n '[{{myvar}}]')\n\n self.assertEqual(\n t.render(Context({\n 'num': 123,\n })),\n '[<span>\\n <strong>\\n test123\\n </strong>\\n</span>]')", "def test_basic():\n line = \"test\"\n assert wrap_line(line) == \"test\"", "def test_format_linebreaks():\r\n test_cases = (\r\n ('Simple:\\n\\nLine two', '<p>Simple:</p><p>Line two</p>'),\r\n ('DOS:\\r\\n\\r\\nLine breaks', '<p>DOS:</p><p>Line breaks</p>'),\r\n ('Classic Mac:\\r\\rLine breaks', '<p>Classic Mac:</p><p>Line breaks</p>'),\r\n ('Consecutive:\\n\\n\\n\\n\\n\\nLine breaks', '<p>Consecutive:</p><p>Line breaks</p>'),\r\n ('Multiple:\\r\\n\\r\\nLine\\r\\n\\r\\nbreaks', '<p>Multiple:</p><p>Line</p><p>breaks</p>'),\r\n ('\\nLeading and trailing\\n', '<p>Leading and trailing</p>'),\r\n ('Single\\ndoesn\\'t wrap', '<p>Single\\ndoesn\\'t wrap</p>'),\r\n ('Quote:\\n\\n<blockquote>(1) One\\n\\n(2) Two</blockquote>\\n\\nAfter',\r\n '<p>Quote:</p><blockquote><p>(1) One</p><p>(2) Two</p></blockquote><p>After</p>'),\r\n ('Quote 2:\\n\\n<blockquote>(1) One\\n\\n(2) Two\\n</blockquote>\\n\\nAfter',\r\n '<p>Quote 2:</p><blockquote><p>(1) One</p><p>(2) Two\\n</p></blockquote><p>After</p>'),\r\n )\r\n for input_text, expected_output in test_cases:\r\n yield is_equal, format_linebreaks(input_text), expected_output", "def test_empty_line_before_and_after_title(self):\n self.assertEquals(\"blahblah\\n\\n#### title\\n\\nblahblah\",\n trans(\"blahblah\\n==== title ====\\nblahblah\"))\n\n self.assertEquals(\"blahblah\\n\\n#### title\\n\\nblahblah\",\n trans(\"blahblah\\n\\n==== title ====\\n\\nblahblah\"))", "def test_newline(self):\n input = HTMLParser(StringIO(u'<span class=\"c\">a\\nb</span>'),\n encoding=None)\n expected = ['<span class=\"c\">a</span>',\n '<span class=\"c\">b</span>',\n ]\n lines = list(_group_lines(input))\n self.assertEquals(len(lines), len(expected))\n for a, b in zip(lines, expected):\n self.assertEquals(a.render('html'), b)", "def test_parse_quotes_not_three_vertical_bars(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"did not find 3 '|' characters\")):\n api.parse_quote(\" This is a quote||\", simple_format=False)", "def test():\n print 'It\\'s a test python string\\nwith a \"newline\" character.'\n print r'It\\'s a test python string\\nwith two backslashes an no \"newline\" ' \\\n r'character.'\n print '''It's a test python string\n with a \"newline\" character and leading white-space.'''\n print \"It's a test python string\\nwith a \\\"newline\\\" character.\"\n print r\"It's a test python string with two \\\"backslash\\\" characters.\"\n print \"\"\"It's a test python string\\nwith a \"newline\" character.\"\"\"", "def test_if_string_statement():\n r = convert_code(\n \"{if 'hello'}\\nbar\\n{/if}\")\n assert r == \"{% if 'hello' %}\\nbar\\n{% endif %}\"", "def test_braces_disabled():\n assert get_html(BRACES_TEXT) == \"<p>I am a {{braces}} example.</p>\"", "def test_wrap_2_words():\n w1, w2 = \"n\" * 75, \"n\" * 5\n line = \"%s %s\" % (w1, w2)\n assert wrap_line(line) == \"%s\\n%s\" % (w1, w2)", "def test_if_and_filter_statement():\n r = convert_code(\n \"{if foo and awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if foo and awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def test_double_quoted(self):\n e = yaenv.core.EnvVar('key = \"value\"\\n')\n assert e.key == 'key'\n assert e.value == 'value'\n assert e._interpolate" ]
[ "0.66250724", "0.6362418", "0.6012879", "0.5941337", "0.5929384", "0.5918962", "0.5874929", "0.5810262", "0.5764275", "0.5755598", "0.57518566", "0.57145613", "0.56923354", "0.5679629", "0.565067", "0.5621903", "0.55415255", "0.55312526", "0.55049425", "0.5420975", "0.54023916", "0.53930724", "0.537482", "0.53696513", "0.5340486", "0.53388757", "0.53333783", "0.5285362", "0.5275378", "0.5272473" ]
0.68857753
0
Testing {{...|humanize_list}} with empty list
def test_with_empty_list(self): self.assertEqual(humanize_list([]), '')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_1_item(self):\n self.assertEqual(humanize_list(['a']),\n 'a')", "def test_with_2_items(self):\n self.assertEqual(humanize_list(['a', 'b']),\n 'a and b')", "def test_with_4_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c', 'd']),\n 'a, b, c, and d')", "def test_with_3_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c']),\n 'a, b and c')", "def test_list_to_string_display(self): \n test1 = list_as_text(['a', 'b', 'c', 'd', 'e'])\n self.assertEqual(test1, 'a, b, c, d and e')\n test2 = list_as_text(['Atlanta, GA', 'New York City, NY',\n 'Miami, FL'])\n self.assertEqual(test2, 'Atlanta, GA, New York City, NY and Miami, FL')\n test3 = list_as_text(['Apple a day...'])\n self.assertEqual(test3, 'Apple a day...')\n test4 = list_as_text(['love', 'hate'])\n self.assertEqual(test4, 'love and hate') \n sites = Site.objects.filter(id__in=[2, 3, 4])\n test5 = list_as_text(sites)\n self.assertEqual(test5, 'Hudson Valley, Triangle and Capital Area')", "def test_list(self):\n self.assertEquals(\"* item\", trans(\" * item\"))\n self.assertEquals(\"\\t* item\", trans(\" * item\"))\n self.assertEquals(\"\\t\\t* item\", trans(\" * item\"))", "def humanize_list(\n items: Iterable[str], conjunction: str, item_format: str = \"{!r}\"\n) -> str:\n\n if not items:\n return \"\"\n\n quoted_items = [item_format.format(item) for item in sorted(items)]\n if len(quoted_items) == 1:\n return quoted_items[0]\n\n humanized = \", \".join(quoted_items[:-1])\n\n if len(quoted_items) > 2:\n humanized += \",\"\n\n return \"{} {} {}\".format(humanized, conjunction, quoted_items[-1])", "def test_empty_list(self):\n self.assertEqual(pyperry.Base.resolve_name('ChittyChittyBangBang'), [])", "def test_empty_list(self):\n self.assertLines({\n 'hosts': [],\n 'foo': 'something',\n 'another': [1,2],\n }, [\n 'foo: something hosts: []',\n 'foo: something hosts: [] another: 1',\n 'foo: something hosts: [] another: 2',\n ])", "def process_list(a_list: list):\n\n return ', '.join(str(s) for s in a_list) if a_list else Presenter.DEFAULT", "def test_linked_list_str_format(empty_list):\n expected = 'Head: None | Length: 0'\n actual = str(empty_list)\n assert expected == actual", "def test_empty(self):\n argument = []\n expected = []\n double_preceding(argument)\n self.assertEqual(expected, argument, \"The list is empty.\")", "def test_disallow_empty_string_simple(self):\n learner = TemplateLatticeLearner(\n minimal_variables=True, allow_empty_string=False\n )\n dataset = [\"I am a human\", \"I am a nice human\", \"I am a bad human\"]\n template_tree = learner.learn(dataset)\n\n expected = TemplateTree(\n Template.from_string(\"I am a [SLOT]\"),\n [\n TemplateTree(\n Template.from_string(\"I am a [SLOT] human\"),\n [\n TemplateTree(Template.from_string(s))\n for s in [\"I am a nice human\", \"I am a bad human\"]\n ],\n ),\n TemplateTree(Template.from_string(\"I am a human\"),),\n ],\n )\n print(template_tree_visualiser.render_tree_string(template_tree))\n self.assertEqual(expected, template_tree)", "def test_empty_list(self):\n self.assertEqual(self.obj.to_json_string([]), '[]')", "def test_format_bad_tags(self):\n tags = self.c._format_tags(None)\n self.assertEqual(0, len(tags))", "def test_list(self):\n self.assertValue(\n ['foo', 'bar', 'hello'],\n 'foo\\nbar\\nhello\\n')", "def test_linked_list_repr_format(empty_list):\n expected = '<Linked List | Head: None | Length: 0>'\n actual = repr(empty_list)\n assert expected == actual", "def blank():\r\n return FormattedItem(None, '-')", "def test_empty_transformlist(self):\n tflist = TransformList()\n self.assertEqual(len(tflist), 0)", "def human_list(lst, connector='and'):\n # we don't want to listify non iterables\n if not getattr(lst, '__iter__', False):\n return lst\n else:\n s = ''\n max_idx = len(lst) - 1\n for i, item in enumerate(lst):\n if i == 0:\n t = '%s'\n elif i == max_idx and max_idx > 1:\n t = ', ' + connector + ' %s'\n elif i == max_idx and max_idx == 1:\n t = ' ' + connector + ' %s'\n else:\n t = ', %s'\n s += t % filter.conditional_escape(item)\n return mark_safe(s)", "def test_list_namespaced_template(self):\n pass", "def test_listOnClean(self):\n output = self.userbase('list')\n self.assertEqual(output, ['No accounts'])", "def humans(self):\n return (_ for _ in self._human_list)", "def test_list_none(self):\n self.model.objects.all().delete()\n response = self._get()\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertEquals(response.context['object_list'].count(), 0)", "def humanise_list(lst):\n assert len(lst) > 0\n if len(lst) == 1:\n return lst[0]\n head = \", \".join(lst[:-1])\n tail = lst[-1]\n return f\"{head} and {tail}\"", "def validate_list_field(field: dict, value: list):\n\n if field.get(\"required\") and len(value) == 0:\n return f\"{field.get('label')} is required!\"\n\n return \"\"", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def _clean_list(self, items):\n itemlist = list(filter(None, items))\n if len(itemlist) < 3:\n itemlist.append(\"\")\n return itemlist\n\n return itemlist", "def test_list_field():", "def test_empty_list(self):\n self.assertEqual(max_integer([]), None)" ]
[ "0.7209216", "0.6821816", "0.6803753", "0.6747265", "0.60909104", "0.6047052", "0.5889234", "0.576699", "0.5730108", "0.5727563", "0.5638082", "0.5527138", "0.54196876", "0.5410036", "0.53933257", "0.53890765", "0.53756136", "0.5358281", "0.5352746", "0.5332928", "0.5312761", "0.5306585", "0.5259354", "0.52470875", "0.52279603", "0.52244616", "0.5218621", "0.5215378", "0.5212267", "0.52088964" ]
0.84898084
0
Testing {{...|humanize_list}} with 1 item
def test_with_1_item(self): self.assertEqual(humanize_list(['a']), 'a')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_4_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c', 'd']),\n 'a, b, c, and d')", "def test_with_2_items(self):\n self.assertEqual(humanize_list(['a', 'b']),\n 'a and b')", "def test_with_3_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c']),\n 'a, b and c')", "def humanize_list(\n items: Iterable[str], conjunction: str, item_format: str = \"{!r}\"\n) -> str:\n\n if not items:\n return \"\"\n\n quoted_items = [item_format.format(item) for item in sorted(items)]\n if len(quoted_items) == 1:\n return quoted_items[0]\n\n humanized = \", \".join(quoted_items[:-1])\n\n if len(quoted_items) > 2:\n humanized += \",\"\n\n return \"{} {} {}\".format(humanized, conjunction, quoted_items[-1])", "def test_with_empty_list(self):\n self.assertEqual(humanize_list([]),\n '')", "def test_list_to_string_display(self): \n test1 = list_as_text(['a', 'b', 'c', 'd', 'e'])\n self.assertEqual(test1, 'a, b, c, d and e')\n test2 = list_as_text(['Atlanta, GA', 'New York City, NY',\n 'Miami, FL'])\n self.assertEqual(test2, 'Atlanta, GA, New York City, NY and Miami, FL')\n test3 = list_as_text(['Apple a day...'])\n self.assertEqual(test3, 'Apple a day...')\n test4 = list_as_text(['love', 'hate'])\n self.assertEqual(test4, 'love and hate') \n sites = Site.objects.filter(id__in=[2, 3, 4])\n test5 = list_as_text(sites)\n self.assertEqual(test5, 'Hudson Valley, Triangle and Capital Area')", "def test_list(self):\n self.assertEquals(\"* item\", trans(\" * item\"))\n self.assertEquals(\"\\t* item\", trans(\" * item\"))\n self.assertEquals(\"\\t\\t* item\", trans(\" * item\"))", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n stri= item.theuv.nom + \" - \" + item.nom\r\n return u\"<span class='tag'>%s</span>\" % stri", "def test_link_in_list(self):\n self.assertEquals(\"* [name](name)\\n* name2\\n* name3\",\n trans(\" * [name]\\n * name2\\n * name3\"))", "def format_list_item(entry, index):\n terminal_width = shutil.get_terminal_size().columns\n wrap_width = terminal_width - len(entry.published) - 1\n heading = str(index) + ': ' + entry.title\n wrapped_heading = textwrap.wrap(heading, wrap_width)\n padding = terminal_width - len(wrapped_heading[0] + entry.published)\n if has_been_read(entry):\n return (\n wrapped_heading[0] +\n ' ' * (padding) +\n entry.published +\n '\\n'.join(wrapped_heading[1:])\n )\n else:\n return (\n BOLD +\n wrapped_heading[0] +\n CLEAR +\n ' ' * (padding) +\n entry.published +\n BOLD +\n '\\n'.join(wrapped_heading[1:]) +\n CLEAR\n )", "def items_to_report_element(items, item_type):\r\n def pluralize(x):\r\n if x.endswith('s'):\r\n return x + 'es'\r\n else:\r\n return x + 's'\r\n\r\n items = [str(x) for x in items]\r\n n = len(items)\r\n text = '%d %s' % (n, item_type if n == 1 else pluralize(item_type))\r\n if n == 0:\r\n return text\r\n else:\r\n detail = '\\n'.join(items)\r\n return text, detail", "def process_list(a_list: list):\n\n return ', '.join(str(s) for s in a_list) if a_list else Presenter.DEFAULT", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def humanise_list(lst):\n assert len(lst) > 0\n if len(lst) == 1:\n return lst[0]\n head = \", \".join(lst[:-1])\n tail = lst[-1]\n return f\"{head} and {tail}\"", "def pluralisation(self, plural):\n return \"item\"", "def __str__(self):\n if self.item_info:\n item_type = self.item_info['type'](self.item_info)\n\n return gettext('List of %s') % item_type\n else:\n return gettext('List')", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.filter", "def print_all_items_in_dict_for_human(all_items):\n # Find the length of the longest item.\n longest_item = 0\n for item in all_items:\n item_length = len(f\"{item}\")\n if item_length > longest_item:\n longest_item = item_length\n\n for item in sorted(all_items):\n print(f\"{item}\".rjust(longest_item) + f\": {all_items[item]}\")", "def create_menu_text(list_of_items):\n ret = \"\"\n for item in list_of_items:\n item = clean(item)\n ret += item + \"\\n\"\n # translate = gs.translate(item, 'en', 'de')\n # ret += \"_\" + translate.replace(\" , \", \", \") + \"_\\n\"\n # ret += \"\\n\"\n return ret[:-1] # ignore last newline", "def list_item_html(text: str) -> str:\n return \"<li>{}</li>\".format(text)", "def human_list(lst, connector='and'):\n # we don't want to listify non iterables\n if not getattr(lst, '__iter__', False):\n return lst\n else:\n s = ''\n max_idx = len(lst) - 1\n for i, item in enumerate(lst):\n if i == 0:\n t = '%s'\n elif i == max_idx and max_idx > 1:\n t = ', ' + connector + ' %s'\n elif i == max_idx and max_idx == 1:\n t = ' ' + connector + ' %s'\n else:\n t = ', %s'\n s += t % filter.conditional_escape(item)\n return mark_safe(s)", "def large_list_display(keyval: str, record: dict, title: str):\n if keyval in record:\n if len(record[keyval]):\n res = \", \".join(t[\"value\"].title() for t in record[keyval])\n res = f\"{chunk_long_description(res)}\"\n res = f\"{colored(title, attrs=['bold','underline'])}\\n{res}\"\n print(f\"{res}\\n\")", "def test_display_names(self):\r\n names = [\r\n ('correct', u'correct'),\r\n ('incorrect', u'incorrect'),\r\n ('incomplete', u'incomplete'),\r\n ('unanswered', u'unanswered'),\r\n ('unsubmitted', u'unanswered'),\r\n ('queued', u'processing'),\r\n ('dave', u'dave'),\r\n ]\r\n for status, display_name in names:\r\n statobj = inputtypes.Status(status)\r\n self.assertEqual(statobj.display_name, display_name)", "def test_list(self):\n self.assertValue(\n ['foo', 'bar', 'hello'],\n 'foo\\nbar\\nhello\\n')", "def test_get_passage_with_list(self):\n simple = self.TEI.getPassage([\"1\", \"pr\", \"2\"])\n self.assertEqual(\n simple.text().strip(),\n \"tum, ut de illis queri non possit quisquis de se bene\",\n \"Ensure passage finding with context is fully TEI / Capitains compliant (Different level range Passage)\"\n )", "def test_list_namespaced_template(self):\n pass" ]
[ "0.74867344", "0.7485351", "0.7454677", "0.6828147", "0.67824715", "0.6465609", "0.63791704", "0.58145946", "0.58145946", "0.58145946", "0.58145946", "0.5766288", "0.5729722", "0.572213", "0.57021636", "0.56853616", "0.56729215", "0.5652388", "0.5632467", "0.56143266", "0.55581284", "0.555304", "0.55205864", "0.5507845", "0.5491941", "0.5433869", "0.5394551", "0.5349249", "0.53334147", "0.53234166" ]
0.8221179
0
Testing {{...|humanize_list}} with 2 items
def test_with_2_items(self): self.assertEqual(humanize_list(['a', 'b']), 'a and b')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_1_item(self):\n self.assertEqual(humanize_list(['a']),\n 'a')", "def test_with_3_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c']),\n 'a, b and c')", "def test_with_4_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c', 'd']),\n 'a, b, c, and d')", "def humanize_list(\n items: Iterable[str], conjunction: str, item_format: str = \"{!r}\"\n) -> str:\n\n if not items:\n return \"\"\n\n quoted_items = [item_format.format(item) for item in sorted(items)]\n if len(quoted_items) == 1:\n return quoted_items[0]\n\n humanized = \", \".join(quoted_items[:-1])\n\n if len(quoted_items) > 2:\n humanized += \",\"\n\n return \"{} {} {}\".format(humanized, conjunction, quoted_items[-1])", "def test_list_to_string_display(self): \n test1 = list_as_text(['a', 'b', 'c', 'd', 'e'])\n self.assertEqual(test1, 'a, b, c, d and e')\n test2 = list_as_text(['Atlanta, GA', 'New York City, NY',\n 'Miami, FL'])\n self.assertEqual(test2, 'Atlanta, GA, New York City, NY and Miami, FL')\n test3 = list_as_text(['Apple a day...'])\n self.assertEqual(test3, 'Apple a day...')\n test4 = list_as_text(['love', 'hate'])\n self.assertEqual(test4, 'love and hate') \n sites = Site.objects.filter(id__in=[2, 3, 4])\n test5 = list_as_text(sites)\n self.assertEqual(test5, 'Hudson Valley, Triangle and Capital Area')", "def test_list(self):\n self.assertEquals(\"* item\", trans(\" * item\"))\n self.assertEquals(\"\\t* item\", trans(\" * item\"))\n self.assertEquals(\"\\t\\t* item\", trans(\" * item\"))", "def test_with_empty_list(self):\n self.assertEqual(humanize_list([]),\n '')", "def items_to_report_element(items, item_type):\r\n def pluralize(x):\r\n if x.endswith('s'):\r\n return x + 'es'\r\n else:\r\n return x + 's'\r\n\r\n items = [str(x) for x in items]\r\n n = len(items)\r\n text = '%d %s' % (n, item_type if n == 1 else pluralize(item_type))\r\n if n == 0:\r\n return text\r\n else:\r\n detail = '\\n'.join(items)\r\n return text, detail", "def format_item_display(self, item):\r\n stri= item.theuv.nom + \" - \" + item.nom\r\n return u\"<span class='tag'>%s</span>\" % stri", "def humanise_list(lst):\n assert len(lst) > 0\n if len(lst) == 1:\n return lst[0]\n head = \", \".join(lst[:-1])\n tail = lst[-1]\n return f\"{head} and {tail}\"", "def test_link_in_list(self):\n self.assertEquals(\"* [name](name)\\n* name2\\n* name3\",\n trans(\" * [name]\\n * name2\\n * name3\"))", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def print_all_items_in_dict_for_human(all_items):\n # Find the length of the longest item.\n longest_item = 0\n for item in all_items:\n item_length = len(f\"{item}\")\n if item_length > longest_item:\n longest_item = item_length\n\n for item in sorted(all_items):\n print(f\"{item}\".rjust(longest_item) + f\": {all_items[item]}\")", "def pluralisation(self, plural):\n return \"item\"", "def human_list(lst, connector='and'):\n # we don't want to listify non iterables\n if not getattr(lst, '__iter__', False):\n return lst\n else:\n s = ''\n max_idx = len(lst) - 1\n for i, item in enumerate(lst):\n if i == 0:\n t = '%s'\n elif i == max_idx and max_idx > 1:\n t = ', ' + connector + ' %s'\n elif i == max_idx and max_idx == 1:\n t = ' ' + connector + ' %s'\n else:\n t = ', %s'\n s += t % filter.conditional_escape(item)\n return mark_safe(s)", "def large_list_display(keyval: str, record: dict, title: str):\n if keyval in record:\n if len(record[keyval]):\n res = \", \".join(t[\"value\"].title() for t in record[keyval])\n res = f\"{chunk_long_description(res)}\"\n res = f\"{colored(title, attrs=['bold','underline'])}\\n{res}\"\n print(f\"{res}\\n\")", "def format_list_item(entry, index):\n terminal_width = shutil.get_terminal_size().columns\n wrap_width = terminal_width - len(entry.published) - 1\n heading = str(index) + ': ' + entry.title\n wrapped_heading = textwrap.wrap(heading, wrap_width)\n padding = terminal_width - len(wrapped_heading[0] + entry.published)\n if has_been_read(entry):\n return (\n wrapped_heading[0] +\n ' ' * (padding) +\n entry.published +\n '\\n'.join(wrapped_heading[1:])\n )\n else:\n return (\n BOLD +\n wrapped_heading[0] +\n CLEAR +\n ' ' * (padding) +\n entry.published +\n BOLD +\n '\\n'.join(wrapped_heading[1:]) +\n CLEAR\n )", "def unordered_list_html(list_items: List[str]) -> str:\n return \"<ul>{}</ul>\".format(\"\".join(list_items))", "def test_list(self):\n self.assertValue(\n ['foo', 'bar', 'hello'],\n 'foo\\nbar\\nhello\\n')", "def create_menu_text(list_of_items):\n ret = \"\"\n for item in list_of_items:\n item = clean(item)\n ret += item + \"\\n\"\n # translate = gs.translate(item, 'en', 'de')\n # ret += \"_\" + translate.replace(\" , \", \", \") + \"_\\n\"\n # ret += \"\\n\"\n return ret[:-1] # ignore last newline", "def test_reformat_paragraph_list_2_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"4.0\", \"4.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.filter", "def list_item_html(text: str) -> str:\n return \"<li>{}</li>\".format(text)", "def test_reformat_paragraph_list_1_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is the \"lead\"\n paragraph.\n\n 1. This is item \n number 1. It is the first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item \n number 1. It is the first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"4.0\", \"4.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))", "def show_list(self, desc, lst, writeln):\n if not lst:\n return\n val = ', '.join([list_escape(v) for v in lst])\n writeln(\"%s: %s\" % (desc, val))" ]
[ "0.7770209", "0.7701434", "0.7694392", "0.67632717", "0.65721804", "0.6350669", "0.63303834", "0.57339954", "0.5732924", "0.5719008", "0.5717518", "0.56404835", "0.56404835", "0.56404835", "0.56404835", "0.562351", "0.5592338", "0.5560638", "0.5533705", "0.5500018", "0.5478888", "0.54771566", "0.5467121", "0.5458818", "0.54543865", "0.5433416", "0.5412882", "0.5373814", "0.53564984", "0.53472203" ]
0.7984499
0
Testing {{...|humanize_list}} with 3 items
def test_with_3_items(self): self.assertEqual(humanize_list(['a', 'b', 'c']), 'a, b and c')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_4_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c', 'd']),\n 'a, b, c, and d')", "def test_with_1_item(self):\n self.assertEqual(humanize_list(['a']),\n 'a')", "def test_with_2_items(self):\n self.assertEqual(humanize_list(['a', 'b']),\n 'a and b')", "def humanize_list(\n items: Iterable[str], conjunction: str, item_format: str = \"{!r}\"\n) -> str:\n\n if not items:\n return \"\"\n\n quoted_items = [item_format.format(item) for item in sorted(items)]\n if len(quoted_items) == 1:\n return quoted_items[0]\n\n humanized = \", \".join(quoted_items[:-1])\n\n if len(quoted_items) > 2:\n humanized += \",\"\n\n return \"{} {} {}\".format(humanized, conjunction, quoted_items[-1])", "def test_list_to_string_display(self): \n test1 = list_as_text(['a', 'b', 'c', 'd', 'e'])\n self.assertEqual(test1, 'a, b, c, d and e')\n test2 = list_as_text(['Atlanta, GA', 'New York City, NY',\n 'Miami, FL'])\n self.assertEqual(test2, 'Atlanta, GA, New York City, NY and Miami, FL')\n test3 = list_as_text(['Apple a day...'])\n self.assertEqual(test3, 'Apple a day...')\n test4 = list_as_text(['love', 'hate'])\n self.assertEqual(test4, 'love and hate') \n sites = Site.objects.filter(id__in=[2, 3, 4])\n test5 = list_as_text(sites)\n self.assertEqual(test5, 'Hudson Valley, Triangle and Capital Area')", "def test_list(self):\n self.assertEquals(\"* item\", trans(\" * item\"))\n self.assertEquals(\"\\t* item\", trans(\" * item\"))\n self.assertEquals(\"\\t\\t* item\", trans(\" * item\"))", "def test_with_empty_list(self):\n self.assertEqual(humanize_list([]),\n '')", "def create_menu_text(list_of_items):\n ret = \"\"\n for item in list_of_items:\n item = clean(item)\n ret += item + \"\\n\"\n # translate = gs.translate(item, 'en', 'de')\n # ret += \"_\" + translate.replace(\" , \", \", \") + \"_\\n\"\n # ret += \"\\n\"\n return ret[:-1] # ignore last newline", "def print_all_items_in_dict_for_human(all_items):\n # Find the length of the longest item.\n longest_item = 0\n for item in all_items:\n item_length = len(f\"{item}\")\n if item_length > longest_item:\n longest_item = item_length\n\n for item in sorted(all_items):\n print(f\"{item}\".rjust(longest_item) + f\": {all_items[item]}\")", "def format_item_display(self, item):\r\n stri= item.theuv.nom + \" - \" + item.nom\r\n return u\"<span class='tag'>%s</span>\" % stri", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def show_item_list():\n # 3 items per line\n line = []\n linecounter = 0\n item_string = \"\"\n counter = 0\n text_spacer = 20\n clear_messages(0)\n\n for i in range(0, len(ITEMS)):\n space = text_spacer - len(ITEMS[i])\n item_string = item_string + ITEMS[i] + (' ' * space)\n counter += 1\n if counter == 3:\n line.append(item_string)\n linecounter += 1\n item_string = \"\"\n counter = 0\n if counter < 3:\n line.append(item_string)\n\n for i in range(0, linecounter + 1):\n printmessage(line[i], i + 1, MAGENTA, 0)\n clear_messages(3)", "def items_to_report_element(items, item_type):\r\n def pluralize(x):\r\n if x.endswith('s'):\r\n return x + 'es'\r\n else:\r\n return x + 's'\r\n\r\n items = [str(x) for x in items]\r\n n = len(items)\r\n text = '%d %s' % (n, item_type if n == 1 else pluralize(item_type))\r\n if n == 0:\r\n return text\r\n else:\r\n detail = '\\n'.join(items)\r\n return text, detail", "def test_link_in_list(self):\n self.assertEquals(\"* [name](name)\\n* name2\\n* name3\",\n trans(\" * [name]\\n * name2\\n * name3\"))", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def large_list_display(keyval: str, record: dict, title: str):\n if keyval in record:\n if len(record[keyval]):\n res = \", \".join(t[\"value\"].title() for t in record[keyval])\n res = f\"{chunk_long_description(res)}\"\n res = f\"{colored(title, attrs=['bold','underline'])}\\n{res}\"\n print(f\"{res}\\n\")", "def format_list_item(entry, index):\n terminal_width = shutil.get_terminal_size().columns\n wrap_width = terminal_width - len(entry.published) - 1\n heading = str(index) + ': ' + entry.title\n wrapped_heading = textwrap.wrap(heading, wrap_width)\n padding = terminal_width - len(wrapped_heading[0] + entry.published)\n if has_been_read(entry):\n return (\n wrapped_heading[0] +\n ' ' * (padding) +\n entry.published +\n '\\n'.join(wrapped_heading[1:])\n )\n else:\n return (\n BOLD +\n wrapped_heading[0] +\n CLEAR +\n ' ' * (padding) +\n entry.published +\n BOLD +\n '\\n'.join(wrapped_heading[1:]) +\n CLEAR\n )", "def list_item_html(text: str) -> str:\n return \"<li>{}</li>\".format(text)", "def humanise_list(lst):\n assert len(lst) > 0\n if len(lst) == 1:\n return lst[0]\n head = \", \".join(lst[:-1])\n tail = lst[-1]\n return f\"{head} and {tail}\"", "def human_list(lst, connector='and'):\n # we don't want to listify non iterables\n if not getattr(lst, '__iter__', False):\n return lst\n else:\n s = ''\n max_idx = len(lst) - 1\n for i, item in enumerate(lst):\n if i == 0:\n t = '%s'\n elif i == max_idx and max_idx > 1:\n t = ', ' + connector + ' %s'\n elif i == max_idx and max_idx == 1:\n t = ' ' + connector + ' %s'\n else:\n t = ', %s'\n s += t % filter.conditional_escape(item)\n return mark_safe(s)", "def unordered_list_html(list_items: List[str]) -> str:\n return \"<ul>{}</ul>\".format(\"\".join(list_items))", "def _make_song_list_html(song_list):\n return '<p class=\"song_name\">' + '<br>'.join([f'{song[\"title\"]} <span class=\"artist_album\">{song[\"artist\"]} - {song[\"album\"]}</span>' for song in song_list]) + '</p>'", "def view_human_priority(unused1, unused2, model, unused3):\n del unused1, unused2, unused3\n return Markup(u\"%s\" % (model.priority_human)) if model else u\"\"", "def test_reformat_paragraph_list_3_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.0\", \"7.0\"),\n after_sel=(\"10.0\", \"10.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def list_to_text(ingridients_list):\n to_return = \"List\\n\"\n for (ingridient, quantity) in ingridients_list:\n to_return = f\"{to_return}{ingridient.name} {quantity}\\n\"\n return to_return", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.filter", "def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))" ]
[ "0.7959641", "0.76548904", "0.7512626", "0.6840446", "0.6606152", "0.6386993", "0.6284902", "0.59038913", "0.5873438", "0.5778882", "0.57520056", "0.5720628", "0.56403565", "0.5628272", "0.561915", "0.561915", "0.561915", "0.561915", "0.5571809", "0.5539878", "0.55234474", "0.5517387", "0.5474973", "0.54695565", "0.54602486", "0.5435363", "0.5434885", "0.5413701", "0.5410008", "0.53891104" ]
0.8175464
0
Testing {{...|humanize_list}} with 4 items
def test_with_4_items(self): self.assertEqual(humanize_list(['a', 'b', 'c', 'd']), 'a, b, c, and d')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_3_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c']),\n 'a, b and c')", "def test_with_1_item(self):\n self.assertEqual(humanize_list(['a']),\n 'a')", "def test_with_2_items(self):\n self.assertEqual(humanize_list(['a', 'b']),\n 'a and b')", "def test_list_to_string_display(self): \n test1 = list_as_text(['a', 'b', 'c', 'd', 'e'])\n self.assertEqual(test1, 'a, b, c, d and e')\n test2 = list_as_text(['Atlanta, GA', 'New York City, NY',\n 'Miami, FL'])\n self.assertEqual(test2, 'Atlanta, GA, New York City, NY and Miami, FL')\n test3 = list_as_text(['Apple a day...'])\n self.assertEqual(test3, 'Apple a day...')\n test4 = list_as_text(['love', 'hate'])\n self.assertEqual(test4, 'love and hate') \n sites = Site.objects.filter(id__in=[2, 3, 4])\n test5 = list_as_text(sites)\n self.assertEqual(test5, 'Hudson Valley, Triangle and Capital Area')", "def humanize_list(\n items: Iterable[str], conjunction: str, item_format: str = \"{!r}\"\n) -> str:\n\n if not items:\n return \"\"\n\n quoted_items = [item_format.format(item) for item in sorted(items)]\n if len(quoted_items) == 1:\n return quoted_items[0]\n\n humanized = \", \".join(quoted_items[:-1])\n\n if len(quoted_items) > 2:\n humanized += \",\"\n\n return \"{} {} {}\".format(humanized, conjunction, quoted_items[-1])", "def test_with_empty_list(self):\n self.assertEqual(humanize_list([]),\n '')", "def test_list(self):\n self.assertEquals(\"* item\", trans(\" * item\"))\n self.assertEquals(\"\\t* item\", trans(\" * item\"))\n self.assertEquals(\"\\t\\t* item\", trans(\" * item\"))", "def print_all_items_in_dict_for_human(all_items):\n # Find the length of the longest item.\n longest_item = 0\n for item in all_items:\n item_length = len(f\"{item}\")\n if item_length > longest_item:\n longest_item = item_length\n\n for item in sorted(all_items):\n print(f\"{item}\".rjust(longest_item) + f\": {all_items[item]}\")", "def create_menu_text(list_of_items):\n ret = \"\"\n for item in list_of_items:\n item = clean(item)\n ret += item + \"\\n\"\n # translate = gs.translate(item, 'en', 'de')\n # ret += \"_\" + translate.replace(\" , \", \", \") + \"_\\n\"\n # ret += \"\\n\"\n return ret[:-1] # ignore last newline", "def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))", "def test_link_in_list(self):\n self.assertEquals(\"* [name](name)\\n* name2\\n* name3\",\n trans(\" * [name]\\n * name2\\n * name3\"))", "def show_item_list():\n # 3 items per line\n line = []\n linecounter = 0\n item_string = \"\"\n counter = 0\n text_spacer = 20\n clear_messages(0)\n\n for i in range(0, len(ITEMS)):\n space = text_spacer - len(ITEMS[i])\n item_string = item_string + ITEMS[i] + (' ' * space)\n counter += 1\n if counter == 3:\n line.append(item_string)\n linecounter += 1\n item_string = \"\"\n counter = 0\n if counter < 3:\n line.append(item_string)\n\n for i in range(0, linecounter + 1):\n printmessage(line[i], i + 1, MAGENTA, 0)\n clear_messages(3)", "def test_reformat_paragraph_list_4_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item number 3. It is the\n third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"10.0\", \"10.0\"),\n after_sel=(\"13.0\", \"13.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def humanise_list(lst):\n assert len(lst) > 0\n if len(lst) == 1:\n return lst[0]\n head = \", \".join(lst[:-1])\n tail = lst[-1]\n return f\"{head} and {tail}\"", "def items_to_report_element(items, item_type):\r\n def pluralize(x):\r\n if x.endswith('s'):\r\n return x + 'es'\r\n else:\r\n return x + 's'\r\n\r\n items = [str(x) for x in items]\r\n n = len(items)\r\n text = '%d %s' % (n, item_type if n == 1 else pluralize(item_type))\r\n if n == 0:\r\n return text\r\n else:\r\n detail = '\\n'.join(items)\r\n return text, detail", "def format_item_display(self, item):\r\n stri= item.theuv.nom + \" - \" + item.nom\r\n return u\"<span class='tag'>%s</span>\" % stri", "def test_reformat_paragraph_list_2_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"4.0\", \"4.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def test_reformat_paragraph_list_5_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item number 3. It is the\n third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item number 3. It is the\n third item in the list.\n\n This paragraph ends the test. It is the\n \"final\" paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"13.0\", \"13.0\"),\n after_sel=(\"15.1\", \"15.1\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def test_reformat_paragraph_list_1_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is the \"lead\"\n paragraph.\n\n 1. This is item \n number 1. It is the first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item \n number 1. It is the first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"4.0\", \"4.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def large_list_display(keyval: str, record: dict, title: str):\n if keyval in record:\n if len(record[keyval]):\n res = \", \".join(t[\"value\"].title() for t in record[keyval])\n res = f\"{chunk_long_description(res)}\"\n res = f\"{colored(title, attrs=['bold','underline'])}\\n{res}\"\n print(f\"{res}\\n\")", "def human_list(lst, connector='and'):\n # we don't want to listify non iterables\n if not getattr(lst, '__iter__', False):\n return lst\n else:\n s = ''\n max_idx = len(lst) - 1\n for i, item in enumerate(lst):\n if i == 0:\n t = '%s'\n elif i == max_idx and max_idx > 1:\n t = ', ' + connector + ' %s'\n elif i == max_idx and max_idx == 1:\n t = ' ' + connector + ' %s'\n else:\n t = ', %s'\n s += t % filter.conditional_escape(item)\n return mark_safe(s)", "def test_reformat_paragraph_list_3_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item \n number 2. It is the second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item \n number 3. It is the third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.0\", \"7.0\"),\n after_sel=(\"10.0\", \"10.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def unordered_list_html(list_items: List[str]) -> str:\n return \"<ul>{}</ul>\".format(\"\".join(list_items))", "def _make_song_list_html(song_list):\n return '<p class=\"song_name\">' + '<br>'.join([f'{song[\"title\"]} <span class=\"artist_album\">{song[\"artist\"]} - {song[\"album\"]}</span>' for song in song_list]) + '</p>'", "def process_list(a_list: list):\n\n return ', '.join(str(s) for s in a_list) if a_list else Presenter.DEFAULT", "def print_list(self):\n self.print_avec_separateur(\" \")", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom", "def format_item_display(self, item):\r\n return u\"<span class='tag'>%s</span>\" % item.nom" ]
[ "0.7818746", "0.75763154", "0.73760206", "0.6708089", "0.6705747", "0.63406163", "0.63235223", "0.59415454", "0.57278186", "0.56521595", "0.56213796", "0.5577789", "0.5576578", "0.5571174", "0.55692714", "0.55645466", "0.5553006", "0.5546997", "0.5542719", "0.554077", "0.5516904", "0.5513699", "0.5512578", "0.5504654", "0.54822016", "0.5477821", "0.5435514", "0.543475", "0.543475", "0.543475" ]
0.8308927
0
Testing {% include_as_string %}
def test_basic_usage(self): t = Template('{% load djblets_utils %}' '{% include_as_string template_name %}') self.assertEqual( t.render(Context({ 'template_name': 'testing/foo.html', 'foo': 1, 'bar': 2, })), "'1 2\\\n'")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def include_string(parser, token):\n\tbits = token.split_contents()\n\tif len(bits) != 2:\n\t\traise TemplateSyntaxError(\"%r tag takes one argument: the template string to be included\" % bits[0])\n \tstring = parser.compile_filter(bits[1])\n\treturn IncludeStringNode(string)", "def test_includes(self):\n collection = lookup.TemplateLookup()\n\n collection.put_string(\n \"base\",\n \"\"\"\n <%def name=\"a()\">base_a</%def>\n This is the base.\n ${next.body()}\n End base.\n\"\"\",\n )\n\n collection.put_string(\n \"index\",\n \"\"\"\n <%inherit file=\"base\"/>\n this is index.\n a is: ${self.a()}\n <%include file=\"secondary\"/>\n\"\"\",\n )\n\n collection.put_string(\n \"secondary\",\n \"\"\"\n <%inherit file=\"base\"/>\n this is secondary.\n a is: ${self.a()}\n\"\"\",\n )\n\n assert result_lines(collection.get_template(\"index\").render()) == [\n \"This is the base.\",\n \"this is index.\",\n \"a is: base_a\",\n \"This is the base.\",\n \"this is secondary.\",\n \"a is: base_a\",\n \"End base.\",\n \"End base.\",\n ]", "def include_file(ctx, name):\n env = ctx.environment\n return jinja2.Markup(env.loader.get_source(env, name)[0])", "def django_template_include(file_name, mako_context):\r\n\r\n dictionary = dict(mako_context)\r\n return loader.render_to_string(file_name, dictionary=dictionary)", "def test_insert_file_tag(self): \n content = \"Here is an included file: <toplevelcontent> {% insert_file public_html/fakeinclude.html %}</toplevelcontent>\" \n insertfiletagpage = create_page_in_admin(self.testproject,\"testincludefiletagpage\",content)\n \n response = self._test_page_can_be_viewed(self.signedup_user,insertfiletagpage)\n \n \n # Extract rendered content from included file, see if it has been rendered\n # In the correct way\n somecss = find_text_between('<somecss>','</somecss>',response.content)\n nonexistant = find_text_between('<nonexistant>','</nonexistant>',response.content)\n scary = find_text_between('<scary>','</scary>',response.content)\n \n self.assertTrue(somecss != \"\",\"Nothing was rendered when including an existing file. Some css should be here\")\n self.assertTrue(nonexistant != \"\",\"Nothing was rendered when including an existing file. Some css should be here\")\n self.assertTrue(scary != \"\",\"Nothing was rendered when trying to go up the directory tree with ../ At least some error should be printed\")\n \n self.assertTrue(\"body {width:300px;}\" in somecss,\"Did not find expected\"\n \" content 'body {width:300px;}' when including a test\"\n \" css file. Instead found '%s'\" % somecss)\n self.assertTrue(\"Error including file\" in nonexistant,\"Expected a\"\n \" message 'Error including file' when including \"\n \"non-existant file. Instead found '%s'\" % nonexistant)\n self.assertTrue(\"Error including file\" in scary ,\n \"Expected a message 'Error including file' when trying to include filepath with ../\"\n \" in it. Instead found '%s'\" %scary)", "def include(self, name, **kwargs):\n with open(osp.join(self.src, name), \"rt\") as fp:\n template = fp.read()\n try:\n html = Template(template).render(\n **kwargs, include=self.include, copy=self.copy, link=self.link\n )\n except Exception:\n print(exceptions.text_error_template().render())\n raise\n return html", "def test_include(self):\n self.assertEqual([\"include\", \"\\\"test.sql\\\"\"],\n grammar._INCLUDE_FILE.parseString(\"#include \\\"test.sql\\\"\").asList())", "def IncludeString(name):\n include_string = \"#include \" + name + \"\\n\"\n\n return include_string", "def include_external_file(ctx, name):\n with open(os.path.abspath(name), \"r\") as f:\n content = f.read()\n return jinja2.Markup(content)", "def default_myst_include(root_file: str) -> str: # noqa\n template_include = dedent(\n \"\"\"\\\n ```{include} ../{root_file}\n :relative-docs: docs/\n :relative-images:\n ```\n \"\"\"\n )\n return template_include.replace(\"{root_file}\", root_file)", "def test_simple(self):\n self.assertRaises(template.TemplateSyntaxError, render, '{% go %}')\n self.assertEqual(render('{% go using \"the_flow.html\" %}'), 'yeah')", "def test_simple(self):\n self.assertEqual(render('{% fish_as as out %}-{{ out }}'), '-fish')\n self.assertEqual(render('{% another_fish_as as out %}-{{ out }}'), '-fish')", "def render_template():\n template_engine = engines['django']\n def func(template_string):\n load_tags_string = '{% load wagtailextensions_tags %}'\n return template_engine.from_string(load_tags_string + template_string).render()\n return func", "def _inline_example( name: str, lines: str = None, image = True ):\n \n result = \"\"\n\n if image:\n result += f\"\"\"\n\n.. only:: html\n\n .. image:: ../examples/images/{name}_html.png\n\n.. only:: pdf\n\n .. image:: ../examples/images/{name}_pdf.png\n\n\"\"\"\n\n if lines is not None:\n result += f\"\"\"\n\n.. literalinclude:: ../examples/{name}.py\n :lines: {lines}\n\n\"\"\"\n return result", "def test_include_template(self, parse_input, tmpdir):\n template = textwrap.dedent(\n \"\"\"\n name CustomOperation\n version 0.0\n BSgate({theta}, pi/2) | [0, 1]\n Rgate({phi}) | 0\n \"\"\"\n )\n\n filename = tmpdir.join(\"test.xbb\")\n\n with open(filename, \"w\") as f:\n f.write(template)\n\n test_include = textwrap.dedent(\n \"\"\"\n name test_include\n version 0.0\n include \"{}\"\n CustomOperation(theta=0.54, phi=0.1) | [2, 1]\n \"\"\"\n ).format(filename)\n\n bb = parse_input(test_include, cwd=tmpdir)\n\n expected = [\n {\"op\": \"BSgate\", \"args\": [0.54, np.pi / 2], \"kwargs\": {}, \"modes\": [2, 1]},\n {\"op\": \"Rgate\", \"args\": [0.1], \"kwargs\": {}, \"modes\": [2]},\n ]\n\n assert bb.operations == expected", "def test_template(project):\n project.add_mock_file(\"templates\", \"test.tmpl\", \"{{ value }}\")\n project.compile(\"\"\"import unittest\nvalue = \"1234\"\nstd::print(std::template(\"unittest/test.tmpl\"))\n \"\"\")\n\n assert project.get_stdout() == \"1234\\n\"", "def test_rendering_includes(self):\n self.run_function(\"state.sls\", [\"pydsl.aaa\"])\n\n expected = textwrap.dedent(\n \"\"\"\\\n X1\n X2\n X3\n Y1 extended\n Y2 extended\n Y3\n hello red 1\n hello green 2\n hello blue 3\n \"\"\"\n )\n\n # Windows adds `linefeed` in addition to `newline`. There's also an\n # unexplainable space before the `linefeed`...\n if salt.utils.platform.is_windows():\n expected = (\n \"X1 \\r\\n\"\n \"X2 \\r\\n\"\n \"X3 \\r\\n\"\n \"Y1 extended \\r\\n\"\n \"Y2 extended \\r\\n\"\n \"Y3 \\r\\n\"\n \"hello red 1 \\r\\n\"\n \"hello green 2 \\r\\n\"\n \"hello blue 3 \\r\\n\"\n )\n\n try:\n with salt.utils.files.fopen(\"/tmp/output\", \"r\") as f:\n ret = salt.utils.stringutils.to_unicode(f.read())\n finally:\n os.remove(\"/tmp/output\")\n\n self.assertEqual(sorted(ret), sorted(expected))", "def test_theme_template_loading_by_prefix():\n app = create_ctfd()\n with app.test_request_context():\n tpl1 = render_template_string(\"{% extends 'core/page.html' %}\", content=\"test\")\n tpl2 = render_template(\"page.html\", content=\"test\")\n assert tpl1 == tpl2", "def test_raw_static_check():\r\n path = '\"/static/foo.png?raw\"'\r\n assert_equals(path, replace_static_urls(path, DATA_DIRECTORY))\r\n\r\n text = 'text <tag a=\"/static/js/capa/protex/protex.nocache.js?raw\"/><div class=\"'\r\n assert_equals(path, replace_static_urls(path, text))", "def render_inclusion(func, file_name, takes_context, django_context, *args, **kwargs):\r\n\r\n if takes_context:\r\n args = [django_context] + list(args)\r\n\r\n _dict = func(*args, **kwargs)\r\n if isinstance(file_name, Template):\r\n t = file_name\r\n elif not isinstance(file_name, basestring) and is_iterable(file_name):\r\n t = select_template(file_name)\r\n else:\r\n t = get_template(file_name)\r\n\r\n nodelist = t.nodelist\r\n\r\n new_context = Context(_dict)\r\n csrf_token = django_context.get('csrf_token', None)\r\n if csrf_token is not None:\r\n new_context['csrf_token'] = csrf_token\r\n\r\n return nodelist.render(new_context)", "def test_basic(self):\n template = get_template('basic.html')\n context = Context({'eggs': 'Sausage'})\n if django.VERSION >= (1, 8):\n context = context.flatten()\n rendered = template.render(context)\n self.assert_expected(rendered, 'basic.expected.html')", "def run(self):\n\n # from sphynx Include Directive in https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/other.py\n # type: () -> List[nodes.Node]\n env = self.state.document.settings.env\n if self.arguments[0].startswith('<') and \\\n self.arguments[0].endswith('>'):\n # docutils \"standard\" includes, do not do path processing\n return BaseInclude.run(self)\n rel_filename, filename = env.relfn2path(self.arguments[0])\n self.arguments[0] = filename\n env.note_included(filename)\n #end\n\n if not self.state.document.settings.file_insertion_enabled:\n raise self.warning('\"%s\" directive disabled.' % self.name)\n source = self.state_machine.input_lines.source(\n self.lineno - self.state_machine.input_offset - 1)\n source_dir = os.path.dirname(os.path.abspath(source))\n path = directives.path(self.arguments[0])\n if path.startswith('<') and path.endswith('>'):\n path = os.path.join(self.standard_include_path, path[1:-1])\n path = os.path.normpath(os.path.join(source_dir, path))\n path = utils.relative_path(None, path)\n path = nodes.reprunicode(path)\n encoding = self.options.get(\n 'encoding', self.state.document.settings.input_encoding)\n e_handler=self.state.document.settings.input_encoding_error_handler\n tab_width = self.options.get(\n 'tab-width', self.state.document.settings.tab_width)\n try:\n self.state.document.settings.record_dependencies.add(path)\n include_file = io.FileInput(source_path=path,\n encoding=encoding,\n error_handler=e_handler)\n except UnicodeEncodeError as error:\n raise self.severe(u'Problems with \"%s\" directive path:\\n'\n 'Cannot encode input file path \"%s\" '\n '(wrong locale?).' %\n (self.name, SafeString(path)))\n except IOError as error:\n raise self.severe(u'Problems with \"%s\" directive path:\\n%s.' %\n (self.name, ErrorString(error)))\n startline = self.options.get('start-line', None)\n endline = self.options.get('end-line', None)\n try:\n if startline or (endline is not None):\n lines = include_file.readlines()\n rawtext = ''.join(lines[startline:endline])\n else:\n rawtext = include_file.read()\n except UnicodeError as error:\n raise self.severe(u'Problem with \"%s\" directive:\\n%s' %\n (self.name, ErrorString(error)))\n # start-after/end-before: no restrictions on newlines in match-text,\n # and no restrictions on matching inside lines vs. line boundaries\n after_text = self.options.get('start-after', None)\n if after_text:\n # skip content in rawtext before *and incl.* a matching text\n after_index = rawtext.find(after_text)\n if after_index < 0:\n raise self.severe('Problem with \"start-after\" option of \"%s\" '\n 'directive:\\nText not found.' % self.name)\n rawtext = rawtext[after_index + len(after_text):]\n before_text = self.options.get('end-before', None)\n if before_text:\n # skip content in rawtext after *and incl.* a matching text\n before_index = rawtext.find(before_text)\n if before_index < 0:\n raise self.severe('Problem with \"end-before\" option of \"%s\" '\n 'directive:\\nText not found.' % self.name)\n rawtext = rawtext[:before_index]\n\n # Handle alternate comment styles\n style = self.options.get('style', 'C-style')\n if style not in COMMENT_STYLES:\n raise self.severe('Cannot find comment style \"%s\", not in %s'\n % (style, COMMENT_STYLES.keys()))\n self.comment_options = COMMENT_STYLES[style]\n\n rawtext = self.filterText(rawtext)\n #if (path == \"../examples/neuropil_hydra.c\"):\n #raise self.severe('filterd text from %s:\\n%s' % (path, rawtext))\n\n include_lines = statemachine.string2lines(rawtext, tab_width,\n convert_whitespace=True)\n if 'literal' in self.options:\n # Convert tabs to spaces, if `tab_width` is positive.\n if tab_width >= 0:\n text = rawtext.expandtabs(tab_width)\n else:\n text = rawtext\n literal_block = nodes.literal_block(rawtext, source=path,\n classes=self.options.get('class', []))\n literal_block.line = 1\n self.add_name(literal_block)\n if 'number-lines' in self.options:\n try:\n startline = int(self.options['number-lines'] or 1)\n except ValueError:\n raise self.error(':number-lines: with non-integer '\n 'start value')\n endline = startline + len(include_lines)\n if text.endswith('\\n'):\n text = text[:-1]\n tokens = NumberLines([([], text)], startline, endline)\n for classes, value in tokens:\n if classes:\n literal_block += nodes.inline(value, value,\n classes=classes)\n else:\n literal_block += nodes.Text(value, value)\n else:\n literal_block += nodes.Text(text, text)\n return [literal_block]\n if 'code' in self.options:\n self.options['source'] = path\n codeblock = CodeBlock(self.name,\n [self.options.pop('code')], # arguments\n self.options,\n include_lines, # content\n self.lineno,\n self.content_offset,\n self.block_text,\n self.state,\n self.state_machine)\n return codeblock.run()\n\n self.state_machine.insert_input(include_lines, path)\n return []", "def test_replace_namespaced_template(self):\n pass", "def test_read_namespaced_template(self):\n pass", "def construct_include(loader: Loader, node: yaml.Node) -> Any:\n\n filename = os.path.abspath(\n os.path.join(loader._root, loader.construct_scalar(node))\n )\n extension = os.path.splitext(filename)[1].lstrip(\".\")\n\n with open(filename, \"r\") as f:\n if extension in (\"yaml\", \"yml\"):\n return yaml.load(f, Loader)\n elif extension in (\"json\",):\n return json.load(f)\n else:\n return \"\".join(f.readlines())", "def test_get_page_template_tag(self):\n context = Context({})\n pl1 = \"\"\"{% load pages_tags %}{% get_page get-page-slug as toto %}{{ toto }}\"\"\"\n template = get_template_from_string(pl1)\n self.assertEqual(template.render(context), u'None')\n page = self.new_page({'slug':'get-page-slug'})\n self.assertEqual(template.render(context), u'get-page-slug')", "def test_can_load_relative_include(self):\r\n path = os.path.join(TEST_FILES_PATH, \"test_rel_include.bb\")\r\n steps, vars = ExecuteScriptFile(path, {})\r\n self.assertEquals(vars['test'], \"Hello World\")", "def test_get_template_tag_on_page(self):\n load_tag = \"{%% load %s %%}\" % self.tag_name\n edit_tag = \"{%% %s %s %%}\" % (\n self.tag_name,\n self.model_name\n )\n template = Template(load_tag + edit_tag)\n queryset = self.model.objects.first()\n context = Context({\"contact\": queryset})\n needed_path = reverse_lazy(\n \"admin:%s_%s_change\" % (self.app_name, self.model_name),\n args=(queryset.id,)\n )\n self.assertEqual(needed_path, template.render(context))", "def test_register_template(self):\n pass", "def test_non_nested_template_source_generation(self):\n sources = [source for source in self.loader.get_template_sources('component.html')]\n\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/component/component.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/component/component.html')" ]
[ "0.72237074", "0.7058335", "0.6938502", "0.69019485", "0.6657269", "0.66082406", "0.6418754", "0.623286", "0.620674", "0.5977595", "0.5906052", "0.5865723", "0.58279705", "0.5799238", "0.5733636", "0.57208425", "0.57152575", "0.5704218", "0.5651656", "0.5632499", "0.56095517", "0.55745834", "0.556461", "0.5520287", "0.55140966", "0.55090207", "0.5476301", "0.5464895", "0.5462963", "0.5455186" ]
0.7729179
0
Testing {{...|indent}} with default indentation level
def test_with_default_indent(self): self.assertEqual(indent('foo'), ' foo')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_custom_indent(self):\n self.assertEqual(indent('foo', 3), ' foo')", "def test_adjust_indent():\n hr.Element.indent = 2\n\n body = hr.Body()\n body.append(hr.P(\"some text\"))\n html = hr.Html(body)\n\n file_contents = render_result(html)\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n for i in range(3): # this needed to be adapted to the <DOCTYPE> tag\n assert lines[i + 1].startswith(i * (\" \" * hr.Element.indent) + \"<\")\n\n assert lines[4].startswith(3 * (\" \" * hr.Element.indent) + \"some\")", "def test_with_multiple_lines(self):\n self.assertEqual(indent('foo\\nbar'),\n ' foo\\n bar')", "def test_reset_limit_on_indent(self):\n indenter = indent.Indenter()\n indenter.indentation = -2\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = -1\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = 0\n indenter.indent()\n indenter.indentation = +1\n indenter.indent()\n indenter.indentation = +2\n indenter.indent()", "def doIndent(context, match):\n\treturn True\n\tv = context.getVariables().getParent ()\n\ti = v.get(\"requiredIndent\") or 0\n\tv.set(\"requiredIndent\", i + 1)\n\treturn True", "def test_indent():\n\n multiline_string = \"\"\"test\ntest1\ntest2\ntest3\"\"\"\n\n indented_multiline_string = \"\"\" test\n test1\n test2\n test3\"\"\"\n\n assert indented_multiline_string == _indent(multiline_string, 4)", "def test_multiple_indent():\n body = hr.Body()\n body.append(hr.P(\"some text\"))\n html = hr.Html(body)\n\n file_contents = render_result(html)\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n for i in range(3): # this needed to be adapted to the <DOCTYPE> tag\n assert lines[i + 1].startswith(i * (\" \" * hr.Element.indent) + \"<\")\n\n assert lines[4].startswith(3 * (\" \" * hr.Element.indent) + \"some\")", "def block_indent(text, spaces=4):\n return '\\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])", "def check_indent_allowed(self) -> bool:\n return True", "def test_indent_rigidly(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n \tline 1\n \t line a\n \t line b\n \tline c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"5.0\"),\n after_sel=(\"2.0\", \"5.1\"),\n command_name=\"indent-rigidly\",\n )", "def indent(self, indent: str = default_indent):\n ori_bullet = self._bullet\n ori_indent = self._indent\n if not self._bullet:\n self._indent = self._indent + indent\n self._bullet = \"\"\n self._update()\n try:\n yield self\n finally:\n self._bullet = ori_bullet\n self._indent = ori_indent\n self._update()", "def check_indent_allowed(self) -> bool:\n return False", "def check_indent_allowed(self) -> bool:\n return False", "def test_indent_contents():\n html = hr.Element(\"some content\")\n file_contents = render_result(html, ind=\"\")\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n assert lines[1].startswith(\" \" * hr.Element.indent)", "def test_element_indent1():\n elem = hr.Element(\"this is some text\")\n\n # This uses the render_results utility above\n file_contents = render_result(elem).strip()\n\n # making sure the content got in there.\n assert \"this is some text\" in file_contents\n\n # break into lines to check indentation\n lines = file_contents.split(\"\\n\")\n # making sure the opening and closing tags are right.\n assert lines[0] == \"<html>\"\n # this line should be indented by the amount specified\n # by the class attribute: \"indent\"\n assert lines[1].startswith((\" \" * hr.Element.indent) + \"thi\")\n assert lines[2] == \"</html>\"\n assert file_contents.endswith(\"</html>\")", "def test_reset_limit_on_dedent(self):\n indenter = indent.Indenter()\n indenter.indentation = -2\n self.assertRaises(ValueError, indenter.dedent)\n indenter.indentation = -1\n self.assertRaises(ValueError, indenter.dedent)\n indenter.indentation = 0\n self.assertRaises(ValueError, indenter.dedent)\n indenter.indentation = +1\n indenter.dedent()\n indenter.indentation = +2\n indenter.dedent()", "def indentation(self, indent: str) -> None:\n self._indent = indent\n self._update()", "def test_incorrect_indent(self, x=1, y=2): # noqa: D207, D213, D407", "def test_indent():\n html = hr.Html(\"some content\")\n file_contents = render_result(html, ind=\" \").rstrip() # remove the end newline\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n assert lines[0].startswith(\" <\")\n print(repr(lines[-1]))\n assert lines[-1].startswith(\" <\")", "def test_newline_and_indent(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n \n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.6\", \"2.6\"),\n after_sel=(\"3.4\", \"3.4\"),\n command_name=\"newline-and-indent\",\n )", "def __enter__():\n IndentedLogger._indent_level += 1\n return IndentedLogger", "def _indent_spaces(self):\n if prettyprint:\n return self.indentspace * self._indent_level\n else:\n return ''", "def doDedent(context, match):\n\treturn True\n\tv = context.getVariables().getParent ()\n\ti = v.get(\"requiredIndent\") or 0\n\tv.set(\"requiredIndent\", i - 1)\n\treturn True", "def test_reformat_paragraph_simple_hanging_indent_3(self):\n before_b = \"\"\"\\\n Honor this line that \n has a hanging indentation, \n please. Hanging\n indentation is valuable\n for lists of all kinds. But \n it is tricky to get right.\n\n Next Paragraph.\n \"\"\"\n after_b = \"\"\"\\\n Honor this line that has a hanging\n indentation, please. Hanging\n indentation is valuable for lists of\n all kinds. But it is tricky to get\n right.\n\n Next Paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def test_reformat_paragraph_simple_hanging_indent(self):\n before_b = \"\"\"\\\n Honor this line that has a hanging indentation, please. Hanging\n indentation is valuable for lists of all kinds. But it is tricky to get right.\n\n Next paragraph.\n \"\"\"\n after_b = \"\"\"\\\n Honor this line that has a hanging\n indentation, please. Hanging\n indentation is valuable for lists of\n all kinds. But it is tricky to get\n right.\n\n Next paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def test_delete_indentation(self):\n before_b = \"\"\"\\\n first line\n line 1\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.8\", \"2.8\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"delete-indentation\",\n )", "def Indent(indents):\n return ' ' * (2 * indents)", "def indent(text, *args):\n _, module_name, line_no, *_ = inspect.stack()[1]\n module_info = _get_module_info(module_name)\n module_source, template_source = module_info.code, module_info.source\n\n source_map = ModuleInfo.get_module_source_metadata(\n module_source,\n full_line_map=True\n )\n\n line_map = source_map['full_line_map']\n template_ln_no = line_map[line_no - 1]\n template_line = template_source.split('\\n')[template_ln_no - 1]\n\n indent = re.match('[ \\t]*', template_line).group(0)\n return indent.join(x for x in text.splitlines(keepends=True))", "def indent(text, prefix, predicate=...): # -> str:\n ...", "def test_indent_relative(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"5.0\", \"5.0\"),\n after_sel=(\"5.8\", \"5.8\"),\n command_name=\"indent-relative\",\n )" ]
[ "0.778307", "0.65845025", "0.64145744", "0.640567", "0.6320119", "0.6283757", "0.6254726", "0.62319493", "0.61933684", "0.6110476", "0.6092362", "0.60786456", "0.60786456", "0.60785407", "0.6023838", "0.59295094", "0.5891613", "0.58672994", "0.58418995", "0.5767337", "0.5701758", "0.56634986", "0.5643155", "0.5640158", "0.5625656", "0.56147844", "0.561323", "0.5592296", "0.5591763", "0.5571662" ]
0.76486266
1
Testing {{...|indent}} with custom indentation level
def test_with_custom_indent(self): self.assertEqual(indent('foo', 3), ' foo')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_default_indent(self):\n self.assertEqual(indent('foo'), ' foo')", "def test_adjust_indent():\n hr.Element.indent = 2\n\n body = hr.Body()\n body.append(hr.P(\"some text\"))\n html = hr.Html(body)\n\n file_contents = render_result(html)\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n for i in range(3): # this needed to be adapted to the <DOCTYPE> tag\n assert lines[i + 1].startswith(i * (\" \" * hr.Element.indent) + \"<\")\n\n assert lines[4].startswith(3 * (\" \" * hr.Element.indent) + \"some\")", "def test_with_multiple_lines(self):\n self.assertEqual(indent('foo\\nbar'),\n ' foo\\n bar')", "def test_multiple_indent():\n body = hr.Body()\n body.append(hr.P(\"some text\"))\n html = hr.Html(body)\n\n file_contents = render_result(html)\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n for i in range(3): # this needed to be adapted to the <DOCTYPE> tag\n assert lines[i + 1].startswith(i * (\" \" * hr.Element.indent) + \"<\")\n\n assert lines[4].startswith(3 * (\" \" * hr.Element.indent) + \"some\")", "def test_indent():\n\n multiline_string = \"\"\"test\ntest1\ntest2\ntest3\"\"\"\n\n indented_multiline_string = \"\"\" test\n test1\n test2\n test3\"\"\"\n\n assert indented_multiline_string == _indent(multiline_string, 4)", "def test_reset_limit_on_indent(self):\n indenter = indent.Indenter()\n indenter.indentation = -2\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = -1\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = 0\n indenter.indent()\n indenter.indentation = +1\n indenter.indent()\n indenter.indentation = +2\n indenter.indent()", "def doIndent(context, match):\n\treturn True\n\tv = context.getVariables().getParent ()\n\ti = v.get(\"requiredIndent\") or 0\n\tv.set(\"requiredIndent\", i + 1)\n\treturn True", "def test_indent_contents():\n html = hr.Element(\"some content\")\n file_contents = render_result(html, ind=\"\")\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n assert lines[1].startswith(\" \" * hr.Element.indent)", "def test_indent_rigidly(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n \tline 1\n \t line a\n \t line b\n \tline c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"5.0\"),\n after_sel=(\"2.0\", \"5.1\"),\n command_name=\"indent-rigidly\",\n )", "def block_indent(text, spaces=4):\n return '\\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])", "def test_element_indent1():\n elem = hr.Element(\"this is some text\")\n\n # This uses the render_results utility above\n file_contents = render_result(elem).strip()\n\n # making sure the content got in there.\n assert \"this is some text\" in file_contents\n\n # break into lines to check indentation\n lines = file_contents.split(\"\\n\")\n # making sure the opening and closing tags are right.\n assert lines[0] == \"<html>\"\n # this line should be indented by the amount specified\n # by the class attribute: \"indent\"\n assert lines[1].startswith((\" \" * hr.Element.indent) + \"thi\")\n assert lines[2] == \"</html>\"\n assert file_contents.endswith(\"</html>\")", "def check_indent_allowed(self) -> bool:\n return True", "def indent(self, indent: str = default_indent):\n ori_bullet = self._bullet\n ori_indent = self._indent\n if not self._bullet:\n self._indent = self._indent + indent\n self._bullet = \"\"\n self._update()\n try:\n yield self\n finally:\n self._bullet = ori_bullet\n self._indent = ori_indent\n self._update()", "def check_indent_allowed(self) -> bool:\n return False", "def check_indent_allowed(self) -> bool:\n return False", "def test_indent():\n html = hr.Html(\"some content\")\n file_contents = render_result(html, ind=\" \").rstrip() # remove the end newline\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n assert lines[0].startswith(\" <\")\n print(repr(lines[-1]))\n assert lines[-1].startswith(\" <\")", "def test_newline_and_indent(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n \n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.6\", \"2.6\"),\n after_sel=(\"3.4\", \"3.4\"),\n command_name=\"newline-and-indent\",\n )", "def test_incorrect_indent(self, x=1, y=2): # noqa: D207, D213, D407", "def test_reset_limit_on_dedent(self):\n indenter = indent.Indenter()\n indenter.indentation = -2\n self.assertRaises(ValueError, indenter.dedent)\n indenter.indentation = -1\n self.assertRaises(ValueError, indenter.dedent)\n indenter.indentation = 0\n self.assertRaises(ValueError, indenter.dedent)\n indenter.indentation = +1\n indenter.dedent()\n indenter.indentation = +2\n indenter.dedent()", "def indent(text, prefix, predicate=...): # -> str:\n ...", "def indentation(self, indent: str) -> None:\n self._indent = indent\n self._update()", "def indent(text, *args):\n _, module_name, line_no, *_ = inspect.stack()[1]\n module_info = _get_module_info(module_name)\n module_source, template_source = module_info.code, module_info.source\n\n source_map = ModuleInfo.get_module_source_metadata(\n module_source,\n full_line_map=True\n )\n\n line_map = source_map['full_line_map']\n template_ln_no = line_map[line_no - 1]\n template_line = template_source.split('\\n')[template_ln_no - 1]\n\n indent = re.match('[ \\t]*', template_line).group(0)\n return indent.join(x for x in text.splitlines(keepends=True))", "def test_indent_relative(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"5.0\", \"5.0\"),\n after_sel=(\"5.8\", \"5.8\"),\n command_name=\"indent-relative\",\n )", "def indent(txt, indent_level):\n indent = \" \" * indent_level\n return \"\\n\".join(indent + x for x in txt.splitlines())", "def indent(self):\n self.indent_level += self.INDENT_STEP", "def indent(self):\n self.indent_level += self.INDENT_STEP", "def Indent(indents):\n return ' ' * (2 * indents)", "def __enter__():\n IndentedLogger._indent_level += 1\n return IndentedLogger", "def _indent_spaces(self):\n if prettyprint:\n return self.indentspace * self._indent_level\n else:\n return ''", "def test_delete_indentation(self):\n before_b = \"\"\"\\\n first line\n line 1\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.8\", \"2.8\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"delete-indentation\",\n )" ]
[ "0.7285234", "0.68716925", "0.6601827", "0.6521513", "0.6454053", "0.6436792", "0.637126", "0.634604", "0.62667", "0.625941", "0.6186124", "0.61533314", "0.6131626", "0.6043399", "0.6043399", "0.602634", "0.59728074", "0.5963381", "0.59593403", "0.5902826", "0.58931136", "0.58611804", "0.58489245", "0.57940996", "0.57560086", "0.57560086", "0.5725257", "0.5689815", "0.56804365", "0.56716067" ]
0.7920645
0
Testing {{...|indent}} with multiple lines
def test_with_multiple_lines(self): self.assertEqual(indent('foo\nbar'), ' foo\n bar')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_custom_indent(self):\n self.assertEqual(indent('foo', 3), ' foo')", "def test_multiple_indent():\n body = hr.Body()\n body.append(hr.P(\"some text\"))\n html = hr.Html(body)\n\n file_contents = render_result(html)\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n for i in range(3): # this needed to be adapted to the <DOCTYPE> tag\n assert lines[i + 1].startswith(i * (\" \" * hr.Element.indent) + \"<\")\n\n assert lines[4].startswith(3 * (\" \" * hr.Element.indent) + \"some\")", "def test_indent():\n\n multiline_string = \"\"\"test\ntest1\ntest2\ntest3\"\"\"\n\n indented_multiline_string = \"\"\" test\n test1\n test2\n test3\"\"\"\n\n assert indented_multiline_string == _indent(multiline_string, 4)", "def block_indent(text, spaces=4):\n return '\\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])", "def test_indent_rigidly(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n \tline 1\n \t line a\n \t line b\n \tline c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"5.0\"),\n after_sel=(\"2.0\", \"5.1\"),\n command_name=\"indent-rigidly\",\n )", "def test_adjust_indent():\n hr.Element.indent = 2\n\n body = hr.Body()\n body.append(hr.P(\"some text\"))\n html = hr.Html(body)\n\n file_contents = render_result(html)\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n for i in range(3): # this needed to be adapted to the <DOCTYPE> tag\n assert lines[i + 1].startswith(i * (\" \" * hr.Element.indent) + \"<\")\n\n assert lines[4].startswith(3 * (\" \" * hr.Element.indent) + \"some\")", "def test_with_default_indent(self):\n self.assertEqual(indent('foo'), ' foo')", "def test_newline_and_indent(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n \n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.6\", \"2.6\"),\n after_sel=(\"3.4\", \"3.4\"),\n command_name=\"newline-and-indent\",\n )", "def test_indent_contents():\n html = hr.Element(\"some content\")\n file_contents = render_result(html, ind=\"\")\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n assert lines[1].startswith(\" \" * hr.Element.indent)", "def test_indent():\n html = hr.Html(\"some content\")\n file_contents = render_result(html, ind=\" \").rstrip() # remove the end newline\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n assert lines[0].startswith(\" <\")\n print(repr(lines[-1]))\n assert lines[-1].startswith(\" <\")", "def __indent_text_block(text):\n lines = text.splitlines()\n if len(lines) > 1:\n out = lines[0] + \"\\r\\n\"\n for i in range(1, len(lines)-1):\n out = out + \" \" + lines[i] + \"\\r\\n\"\n out = out + \" \" + lines[-1]\n return out\n return text", "def test_incorrect_indent(self, x=1, y=2): # noqa: D207, D213, D407", "def test_code(self):\n self.assertEquals(\"\\n\\tline1\\n\\tline2\",\n trans(\"{{{\\nline1\\nline2\\n}}}\"))", "def section_overindented(): # noqa: D416", "def test_reformat_paragraph_simple_hanging_indent_3(self):\n before_b = \"\"\"\\\n Honor this line that \n has a hanging indentation, \n please. Hanging\n indentation is valuable\n for lists of all kinds. But \n it is tricky to get right.\n\n Next Paragraph.\n \"\"\"\n after_b = \"\"\"\\\n Honor this line that has a hanging\n indentation, please. Hanging\n indentation is valuable for lists of\n all kinds. But it is tricky to get\n right.\n\n Next Paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def test_reformat_paragraph_simple_hanging_indent(self):\n before_b = \"\"\"\\\n Honor this line that has a hanging indentation, please. Hanging\n indentation is valuable for lists of all kinds. But it is tricky to get right.\n\n Next paragraph.\n \"\"\"\n after_b = \"\"\"\\\n Honor this line that has a hanging\n indentation, please. Hanging\n indentation is valuable for lists of\n all kinds. But it is tricky to get\n right.\n\n Next paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def _addIndent(self, block, count=1):\n return re.compile(r\"^((?!$))\", re.M).sub(\" \" * count, block)", "def test_element_indent1():\n elem = hr.Element(\"this is some text\")\n\n # This uses the render_results utility above\n file_contents = render_result(elem).strip()\n\n # making sure the content got in there.\n assert \"this is some text\" in file_contents\n\n # break into lines to check indentation\n lines = file_contents.split(\"\\n\")\n # making sure the opening and closing tags are right.\n assert lines[0] == \"<html>\"\n # this line should be indented by the amount specified\n # by the class attribute: \"indent\"\n assert lines[1].startswith((\" \" * hr.Element.indent) + \"thi\")\n assert lines[2] == \"</html>\"\n assert file_contents.endswith(\"</html>\")", "def test_reformat_paragraph_simple_hanging_indent_2(self):\n before_b = \"\"\"\\\n Honor this line that has\n a hanging indentation, please. Hanging\n indentation is valuable for lists of all kinds. But it is tricky to get right.\n\n Next paragraph.\n \"\"\"\n after_b = \"\"\"\\\n Honor this line that has a hanging\n indentation, please. Hanging\n indentation is valuable for lists of\n all kinds. But it is tricky to get\n right.\n\n Next paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"2.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def testIndentedList(self):\n list1 = (\"<para>This is a paragraph.</para><ulist>\"+\n \"<li><para>This is a list item.</para></li>\"+\n \"</ulist><para>This is a paragraph</para>\")\n list2 = '<ulist><li><para>This is a list item.</para></li></ulist>'\n \n self.checkParse('This is a paragraph.\\n - This is a list item.\\n'+\n 'This is a paragraph', list1)\n self.checkParse('This is a paragraph.\\n\\n - This is a list item.'+\n '\\n\\nThis is a paragraph', list1)\n self.checkParse(\"\"\"\n This is a paragraph.\n \n - This is a list item.\n \n This is a paragraph\"\"\", list1)\n self.checkParse(\"\"\"\n This is a paragraph.\n \n - This is a list item.\n This is a paragraph\"\"\", list1)\n self.checkParse(\"\"\"\n - This is a list item.\"\"\", list2)\n self.checkParse(\"\"\"- This is a list item.\"\"\", list2)\n self.checkParse(\"\"\"\\n- This is a list item.\"\"\", list2)", "def doIndent(context, match):\n\treturn True\n\tv = context.getVariables().getParent ()\n\ti = v.get(\"requiredIndent\") or 0\n\tv.set(\"requiredIndent\", i + 1)\n\treturn True", "def check_indent_allowed(self) -> bool:\n return True", "def indent(self, indent: str = default_indent):\n ori_bullet = self._bullet\n ori_indent = self._indent\n if not self._bullet:\n self._indent = self._indent + indent\n self._bullet = \"\"\n self._update()\n try:\n yield self\n finally:\n self._bullet = ori_bullet\n self._indent = ori_indent\n self._update()", "def test_poly_list_indentations():\n lines = inspect.getsource(poly_list)\n spaces = re.findall('\\n +.', lines)\n for space in spaces:\n assert len(space) % 4 == 2, \"Your script contains misplaced indentations\"\n assert len(re.sub(r'[^ ]', '', space)) % 4 == 0, \"Your code indentation does not follow PEP8 guidelines\"", "def test_indent_relative(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"5.0\", \"5.0\"),\n after_sel=(\"5.8\", \"5.8\"),\n command_name=\"indent-relative\",\n )", "def indent(text, *args):\n _, module_name, line_no, *_ = inspect.stack()[1]\n module_info = _get_module_info(module_name)\n module_source, template_source = module_info.code, module_info.source\n\n source_map = ModuleInfo.get_module_source_metadata(\n module_source,\n full_line_map=True\n )\n\n line_map = source_map['full_line_map']\n template_ln_no = line_map[line_no - 1]\n template_line = template_source.split('\\n')[template_ln_no - 1]\n\n indent = re.match('[ \\t]*', template_line).group(0)\n return indent.join(x for x in text.splitlines(keepends=True))", "def test_delete_indentation(self):\n before_b = \"\"\"\\\n first line\n line 1\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.8\", \"2.8\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"delete-indentation\",\n )", "def test_reset_limit_on_indent(self):\n indenter = indent.Indenter()\n indenter.indentation = -2\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = -1\n self.assertRaises(ValueError, indenter.indent)\n indenter.indentation = 0\n indenter.indent()\n indenter.indentation = +1\n indenter.indent()\n indenter.indentation = +2\n indenter.indent()", "def check_indent_allowed(self) -> bool:\n return False", "def check_indent_allowed(self) -> bool:\n return False" ]
[ "0.735322", "0.7073774", "0.69509804", "0.6840931", "0.68078333", "0.68071306", "0.67973256", "0.67146814", "0.66842926", "0.64460665", "0.63834816", "0.6257567", "0.6255706", "0.6243714", "0.6222274", "0.61781883", "0.6087206", "0.6076181", "0.60641384", "0.605803", "0.60136795", "0.59893507", "0.5961345", "0.595385", "0.5944311", "0.5935975", "0.5927913", "0.59261894", "0.58926046", "0.58926046" ]
0.7795303
0
Testing {% querystring "update" %} basic usage
def test_update_basic_usage(self): self.assertEqual( self._render_tag(tag='{% querystring "update" "foo=bar" %}', query_str='foo=bar'), '?foo=bar')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_update_with_existing_query_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=foo&bar=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=baz'))", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def update(request):\n return 0", "def test_sqlite_update(self):\r\n self._get_good_request()\r\n\r\n # now we need to do another request with updated tag string\r\n self._get_good_request(new_tags=u\"google books icons\")\r\n\r\n search_res = self.testapp.get('/admin/results?search=icon')\r\n self.assertTrue(\r\n search_res.status == '200 OK',\r\n \"Status is 200: \" + search_res.status)\r\n\r\n self.assertTrue(\r\n 'icon' in search_res.body,\r\n \"We should find the new tag icon on the page: \" + search_res.body)", "def test_update_returns_entry_random(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=\"WAT\",\n body=\"Bob Dole\",\n creation_date=\"1/2/3\")\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == \"WAT\"\n assert query_reslts.body == \"Bob Dole\"", "def test_update_returns_entry_2(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[1][\"title\"],\n body=ENTRIES[1][\"body\"],\n creation_date=ENTRIES[1][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[1][\"title\"]\n assert query_reslts.body == ENTRIES[1][\"body\"]", "def test_update_returns_entry_1(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[0][\"title\"],\n body=ENTRIES[0][\"body\"],\n creation_date=ENTRIES[0][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[0][\"title\"]\n assert query_reslts.body == ENTRIES[0][\"body\"]", "def test_post_partial_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title Patched'\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(user_url, response.content)", "def test_updated_nonexistent(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 2, 'format': 'json', 'updated': 1}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(response.status_code, 200)", "def test_ingredients_update(self):\n app = self.create_app()\n\n c = app.test_client()\n\n # tests if authorization is required\n rv = c.get('/ingredients/1/update')\n self.assertRedirects(rv, \"/auth/login\")\n\n register(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n login(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n c.get('/ingredients/ing_unittest1_liquid/update')\n self.assert_template_used(\"ingredients/update.html\")", "def update():\n return 'update api in put'", "def updated_query_str(request, *args):\n\n return urllib.urlencode(updated_query(request, *args))", "def test_update_route_has_populated_form(testapp, fill_the_db):\n response = testapp.get('/journal/1/edit-entry', status=200)\n title = response.html.form.input[\"value\"]\n body = response.html.form.textarea.contents[0]\n assert title == ENTRIES[0][\"title\"]\n assert body == ENTRIES[0][\"body\"]", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def test_user_update_request(self):\n pass", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass" ]
[ "0.8160826", "0.81003916", "0.78848255", "0.781702", "0.7786558", "0.77261317", "0.7715116", "0.7595097", "0.7405113", "0.63803375", "0.6078759", "0.59671193", "0.5962284", "0.59361494", "0.59328645", "0.5919303", "0.5906413", "0.59014446", "0.58933955", "0.5768803", "0.57684606", "0.57684606", "0.57264465", "0.57242733", "0.57201463", "0.57087797", "0.5685335", "0.5672637", "0.5672637", "0.5672637" ]
0.84873694
0
Testing {% querystring "update" %} with an existing query that gets overridden
def test_update_with_existing_query_override(self): rendered_result = self._render_tag( tag='{% querystring "update" "foo=bar" %}', query_str='foo=foo&bar=baz') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=bar&bar=baz'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def test_update_basic_usage(self):\n self.assertEqual(\n self._render_tag(tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=bar'),\n '?foo=bar')", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def test_updated_nonexistent(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 2, 'format': 'json', 'updated': 1}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(response.status_code, 200)", "def test_update_returns_entry_2(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[1][\"title\"],\n body=ENTRIES[1][\"body\"],\n creation_date=ENTRIES[1][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[1][\"title\"]\n assert query_reslts.body == ENTRIES[1][\"body\"]", "def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url", "def test_update_returns_entry_random(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=\"WAT\",\n body=\"Bob Dole\",\n creation_date=\"1/2/3\")\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == \"WAT\"\n assert query_reslts.body == \"Bob Dole\"", "def test_post_partial_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title Patched'\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(user_url, response.content)", "def test_update_returns_entry_1(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[0][\"title\"],\n body=ENTRIES[0][\"body\"],\n creation_date=ENTRIES[0][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[0][\"title\"]\n assert query_reslts.body == ENTRIES[0][\"body\"]", "def updated_query(request, *args):\n # NOTE: it returns a dict not a QueryDict\n\n # recall query_to_dict returns key-val sequence\n # filter out the search key\n updated = {k: v for k, v in query_to_dict(request.GET.copy()) if\n k != \"search\"}\n\n # the args must at least have a key + value\n if len(args) < 2:\n return updated\n\n # helper function to update key-in\n def key_in(dic, keys, val):\n k = keys[0]\n # TODO : broken in the sense that I seem to be only updating\n # lists\n if len(keys) == 1:\n if isinstance(dic[k], list) and val not in dic[k]:\n dic[k].append(val)\n else:\n key_in(dic[k], keys[1:], val)\n\n # call key_in to update\n key_in(updated, args[:-1], args[-1])\n\n # return the updated dict (NOTE: this is not\n # a query dict\n return updated", "def updated_query_str(request, *args):\n\n return urllib.urlencode(updated_query(request, *args))", "def test_sqlite_update(self):\r\n self._get_good_request()\r\n\r\n # now we need to do another request with updated tag string\r\n self._get_good_request(new_tags=u\"google books icons\")\r\n\r\n search_res = self.testapp.get('/admin/results?search=icon')\r\n self.assertTrue(\r\n search_res.status == '200 OK',\r\n \"Status is 200: \" + search_res.status)\r\n\r\n self.assertTrue(\r\n 'icon' in search_res.body,\r\n \"We should find the new tag icon on the page: \" + search_res.body)", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def update_query(self, **updates):\r\n self._url_updates.update(updates)", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_queryset_ttl_success_case(self):", "def updateView(request, query, exquery, wild_card_str):\n query = copy.deepcopy(query)\n exquery = copy.deepcopy(exquery)\n\n if 'modificationtime__castdate__range' in query:\n query['creationdate__castdate__range'] = query['modificationtime__castdate__range']\n del query['modificationtime__castdate__range']\n if 'workinggroup' in query and 'preset' in request.session['requestParams'] and \\\n request.session['requestParams']['preset'] == 'MC' and ',' in query['workinggroup']:\n # excludeWGList = list(str(wg[1:]) for wg in request.session['requestParams']['workinggroup'].split(','))\n # exquery['workinggroup__in'] = excludeWGList\n try:\n del query['workinggroup']\n except:\n pass\n if 'status' in request.session['requestParams'] and request.session['requestParams']['status'] == '':\n try:\n del query['status']\n except:\n pass\n if 'site' in request.session['requestParams'] and request.session['requestParams']['site'] == 'hpc':\n try:\n del query['site']\n except:\n pass\n exquery['site__isnull'] = True\n if 'currentpriority__gte' in query and 'currentpriority__lte' in query:\n query['priority__gte'] = query['currentpriority__gte']\n query['priority__lte'] = query['currentpriority__lte']\n del query['currentpriority__gte']\n del query['currentpriority__lte']\n\n if 'runnumber' in request.session['requestParams'] and request.session['requestParams']['runnumber']:\n try:\n query['runnumber'] = int(request.session['requestParams']['runnumber'])\n except:\n _logger.exception('Provided runnumber is not valid. It should be int')\n\n jedi_tasks_fields = [field.name for field in JediTasks._meta.get_fields() if field.get_internal_type() == 'CharField']\n running_prod_fields = (set([\n field.name for field in RunningProdTasksModel._meta.get_fields() if field.get_internal_type() == 'CharField'\n ])).difference(set(jedi_tasks_fields))\n\n for f in running_prod_fields:\n if f in request.session['requestParams'] and request.session['requestParams'][f] and f not in query and f not in wild_card_str:\n if f == 'hashtags':\n wild_card_str += ' and ('\n wildCards = request.session['requestParams'][f].split(',')\n currentCardCount = 1\n countCards = len(wildCards)\n for card in wildCards:\n if '*' not in card:\n card = '*' + card + '*'\n elif card.startswith('*'):\n card = card + '*'\n elif card.endswith('*'):\n card = '*' + card\n wild_card_str += preprocess_wild_card_string(card, 'hashtags')\n if currentCardCount < countCards:\n wild_card_str += ' and '\n currentCardCount += 1\n wild_card_str += ')'\n elif f == 'scope' and (\n '!' in request.session['requestParams'][f] or '*' in request.session['requestParams'][f]):\n wild_card_str += ' and ({})'.format(preprocess_wild_card_string(request.session['requestParams'][f], f))\n else:\n query[f] = request.session['requestParams'][f]\n\n return query, exquery, wild_card_str", "def test_update_case(self):\n pass", "def test_updated_invalid(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json',\n 'updated': 1, 'updated_date': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(1, json.loads(response.content)['total'])", "def test_user_update_request(self):\n pass", "def partial_update(self, request, pk=None):\n return Response({'http_method':'PATCH'})" ]
[ "0.82285905", "0.8130441", "0.80727655", "0.79774666", "0.78701615", "0.7824038", "0.7694648", "0.74788016", "0.72497344", "0.6282942", "0.6119359", "0.60488284", "0.6003275", "0.60010093", "0.5985671", "0.59831214", "0.5979202", "0.59778893", "0.59083915", "0.5906219", "0.58789855", "0.58285", "0.58285", "0.57628924", "0.5733223", "0.5719945", "0.57169074", "0.57122105", "0.57029295", "0.56817883" ]
0.88072
0
Testing {% querystring "update" %} with two args that get overridden
def test_update_with_existing_query_with_two_args_override(self): rendered_result = self._render_tag( tag='{% querystring "update" "foo=bar" "qux=baz" %}', query_str='foo=foo&bar=bar&baz=baz&qux=qux') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_with_existing_query_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=foo&bar=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=baz'))", "def test_update_basic_usage(self):\n self.assertEqual(\n self._render_tag(tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=bar'),\n '?foo=bar')", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def updated_query_str(request, *args):\n\n return urllib.urlencode(updated_query(request, *args))", "def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url", "def test_post_partial_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title Patched'\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(user_url, response.content)", "def update(*args):", "def updated_query(request, *args):\n # NOTE: it returns a dict not a QueryDict\n\n # recall query_to_dict returns key-val sequence\n # filter out the search key\n updated = {k: v for k, v in query_to_dict(request.GET.copy()) if\n k != \"search\"}\n\n # the args must at least have a key + value\n if len(args) < 2:\n return updated\n\n # helper function to update key-in\n def key_in(dic, keys, val):\n k = keys[0]\n # TODO : broken in the sense that I seem to be only updating\n # lists\n if len(keys) == 1:\n if isinstance(dic[k], list) and val not in dic[k]:\n dic[k].append(val)\n else:\n key_in(dic[k], keys[1:], val)\n\n # call key_in to update\n key_in(updated, args[:-1], args[-1])\n\n # return the updated dict (NOTE: this is not\n # a query dict\n return updated", "def test_update_returns_entry_2(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[1][\"title\"],\n body=ENTRIES[1][\"body\"],\n creation_date=ENTRIES[1][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[1][\"title\"]\n assert query_reslts.body == ENTRIES[1][\"body\"]", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def update_query(self, **updates):\r\n self._url_updates.update(updates)", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def test_updated_nonexistent(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 2, 'format': 'json', 'updated': 1}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(response.status_code, 200)", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def test_user_update_request(self):\n pass", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''", "def test_update_case(self):\n pass", "def test_update_returns_entry_1(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[0][\"title\"],\n body=ENTRIES[0][\"body\"],\n creation_date=ENTRIES[0][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[0][\"title\"]\n assert query_reslts.body == ENTRIES[0][\"body\"]", "def update(request):\n return 0", "def test_indirect_parameters_update(renku_cli, project):\n with chdir(project.path):\n Path(\".renku/tmp\").mkdir(exist_ok=True)\n\n Path(\"script.sh\").write_text(\n \"\"\"\n echo param 1: \"forty-two\" >> .renku/tmp/parameters.yml\n echo param-2: 42.42 >> .renku/tmp/parameters.yml\n echo param3: 42 >> .renku/tmp/parameters.yml\n \"\"\"\n )\n\n project.repository.add(all=True)\n project.repository.commit(\"test setup\")\n\n renku_cli(\"run\", \"sh\", \"script.sh\", stdout=\"result\")\n\n with chdir(project.path):\n Path(\".renku/tmp\").mkdir(exist_ok=True)\n\n Path(\"script.sh\").write_text(\n \"\"\"\n echo param 1: \"forty-two-updated\" >> .renku/tmp/parameters.yml\n echo param-2: 42.42 >> .renku/tmp/parameters.yml\n \"\"\"\n )\n\n project.repository.add(all=True)\n project.repository.commit(\"test setup\")\n\n exit_code, activity = renku_cli(\"update\", \"--all\")\n\n assert 0 == exit_code\n assert {\"forty-two-updated\", \"42.42\", \"42\"} == {a.default_value for a in activity.association.plan.parameters}", "def test_partial_update(self):\n\n action = ActionFactory.create(id=22)\n data = {\n 'name': 'Ação para Melhorar',\n 'institution': 'Vamos Ajudar',\n }\n self.assertNotEqual(action.name, data['name'])\n self.assertNotEqual(action.institution, data['institution'])\n\n response = self.client.patch(reverse('action-detail', args=[23]), data=data)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n response = self.client.patch(reverse('action-detail', args=[22]), data=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['name'], data['name'])\n self.assertEqual(response.data['institution'], data['institution'])", "def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n for key, value in kwargs.items():\n if value == '':\n if key in params:\n del params[key]\n else:\n params[key] = value\n return ('?' + params.urlencode()) if params else ''" ]
[ "0.8393917", "0.81698006", "0.8157803", "0.7903674", "0.7751201", "0.772445", "0.76976013", "0.7611009", "0.7605545", "0.6213056", "0.6131876", "0.6074763", "0.6017298", "0.59298253", "0.5928301", "0.58698416", "0.58698416", "0.58597875", "0.5797873", "0.57940143", "0.578386", "0.5774963", "0.57151526", "0.569676", "0.5691871", "0.568476", "0.5682777", "0.5682281", "0.5681766", "0.5673935" ]
0.8527788
0
Testing {% querystring "update" %} with no value
def test_update_with_no_value(self): rendered_result = self._render_tag( tag='{% querystring "update" "foo" %}', query_str='') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_existing_query_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=foo&bar=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=baz'))", "def test_update_basic_usage(self):\n self.assertEqual(\n self._render_tag(tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=bar'),\n '?foo=bar')", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def test_updated_nonexistent(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 2, 'format': 'json', 'updated': 1}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(response.status_code, 200)", "def update(request):\n return 0", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def test_remove_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"=foo\" %}',\n query_str='foo=foo&foo=bar&baz=baz&=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=bar&baz=baz'))", "def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_updated_invalid(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json',\n 'updated': 1, 'updated_date': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(1, json.loads(response.content)['total'])", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def test_partial_update_should_not_be_allowed(self):\n response = self.client.patch(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_returns_entry_random(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=\"WAT\",\n body=\"Bob Dole\",\n creation_date=\"1/2/3\")\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == \"WAT\"\n assert query_reslts.body == \"Bob Dole\"", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def test_update_returns_entry_1(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[0][\"title\"],\n body=ENTRIES[0][\"body\"],\n creation_date=ENTRIES[0][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[0][\"title\"]\n assert query_reslts.body == ENTRIES[0][\"body\"]", "def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url", "def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n for key, value in kwargs.items():\n if value == '':\n if key in params:\n del params[key]\n else:\n params[key] = value\n return ('?' + params.urlencode()) if params else ''", "def update_settings(self, param):\n if param.name() == '':\n pass", "def test_post_partial_update_logged_in(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n self.client.force_authenticate(user=self.user)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def test_update_returns_entry_2(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[1][\"title\"],\n body=ENTRIES[1][\"body\"],\n creation_date=ENTRIES[1][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[1][\"title\"]\n assert query_reslts.body == ENTRIES[1][\"body\"]", "def test_post_partial_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title Patched'\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(user_url, response.content)" ]
[ "0.83778876", "0.8344275", "0.79929906", "0.7838386", "0.77645916", "0.75217503", "0.7430781", "0.7206279", "0.6957165", "0.63444877", "0.61944747", "0.6171176", "0.6119739", "0.59485555", "0.5840339", "0.58194894", "0.5763195", "0.5723799", "0.5700123", "0.5652544", "0.56483614", "0.563298", "0.56296676", "0.56040734", "0.5594425", "0.559177", "0.55860454", "0.5579432", "0.55660945", "0.5532161" ]
0.8507903
0
Testing {% querystring "update" %} with multiple values
def test_update_with_multiple_values(self): rendered_result = self._render_tag( tag='{% querystring "update" "foo=bar=baz" %}', query_str='foo=foo') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=bar=baz'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def test_update_basic_usage(self):\n self.assertEqual(\n self._render_tag(tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=bar'),\n '?foo=bar')", "def test_update_with_existing_query_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=foo&bar=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=baz'))", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url", "def test_append_with_multiple_values_and_same_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1&a=2&a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def test_append_with_multiple_values_and_same_key_seperated(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1\" \"a=2\" \"a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def updated_query_str(request, *args):\n\n return urllib.urlencode(updated_query(request, *args))", "def updated_query(request, *args):\n # NOTE: it returns a dict not a QueryDict\n\n # recall query_to_dict returns key-val sequence\n # filter out the search key\n updated = {k: v for k, v in query_to_dict(request.GET.copy()) if\n k != \"search\"}\n\n # the args must at least have a key + value\n if len(args) < 2:\n return updated\n\n # helper function to update key-in\n def key_in(dic, keys, val):\n k = keys[0]\n # TODO : broken in the sense that I seem to be only updating\n # lists\n if len(keys) == 1:\n if isinstance(dic[k], list) and val not in dic[k]:\n dic[k].append(val)\n else:\n key_in(dic[k], keys[1:], val)\n\n # call key_in to update\n key_in(updated, args[:-1], args[-1])\n\n # return the updated dict (NOTE: this is not\n # a query dict\n return updated", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def update_query(self, **updates):\r\n self._url_updates.update(updates)", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def test_update_many(self):\n sample_input = \"\"\"\nfoo=100\nbar=200, baz=300\n\"\"\"\n self.assertNotEquals(self.param_dict.get(\"foo\"), 100)\n self.assertNotEquals(self.param_dict.get(\"bar\"), 200)\n self.assertNotEquals(self.param_dict.get(\"baz\"), 300)\n result = self.param_dict.update_many(sample_input)\n log.debug(\"result: %s\", result)\n self.assertEquals(result[\"foo\"], True)\n self.assertEquals(result[\"bar\"], True)\n self.assertEquals(result[\"baz\"], True)\n self.assertEquals(self.param_dict.get(\"foo\"), 100)\n self.assertEquals(self.param_dict.get(\"bar\"), 200)\n self.assertEquals(self.param_dict.get(\"baz\"), 300)", "def test_update(self):\n self.assertEqual(['UPDATE', 'test', 'set a=1'],\n grammar._UPDATE_EXPR.parseString(\"UPDATE test set a=1;\").asList())", "def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def test_update_returns_entry_2(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[1][\"title\"],\n body=ENTRIES[1][\"body\"],\n creation_date=ENTRIES[1][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[1][\"title\"]\n assert query_reslts.body == ENTRIES[1][\"body\"]", "def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n for key, value in kwargs.items():\n if value == '':\n if key in params:\n del params[key]\n else:\n params[key] = value\n return ('?' + params.urlencode()) if params else ''", "def test_sqlite_update(self):\r\n self._get_good_request()\r\n\r\n # now we need to do another request with updated tag string\r\n self._get_good_request(new_tags=u\"google books icons\")\r\n\r\n search_res = self.testapp.get('/admin/results?search=icon')\r\n self.assertTrue(\r\n search_res.status == '200 OK',\r\n \"Status is 200: \" + search_res.status)\r\n\r\n self.assertTrue(\r\n 'icon' in search_res.body,\r\n \"We should find the new tag icon on the page: \" + search_res.body)", "def test_post_partial_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title Patched'\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(user_url, response.content)", "def test_remove_with_multiple_removes(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" \"bar\" \"baz=1\" %}',\n query_str='foo=foo&bar=bar&foo=&baz=1&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('qux=qux'))", "def update(*args):" ]
[ "0.83192694", "0.8115465", "0.7924868", "0.7861795", "0.7820674", "0.75683415", "0.74704516", "0.74522585", "0.7427821", "0.6365287", "0.62719387", "0.6235126", "0.6219152", "0.6127101", "0.6050058", "0.6045493", "0.6045493", "0.6033712", "0.59621257", "0.5841878", "0.5827072", "0.57766354", "0.57401115", "0.5716838", "0.5715516", "0.5675171", "0.5637066", "0.56117797", "0.5570395", "0.55675757" ]
0.8562762
0
Testing {% querystring "update" %} with empty value
def test_update_with_empty_value(self): rendered_result = self._render_tag( tag='{% querystring "update" "foo=" %}', query_str='') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_with_existing_query_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=foo&bar=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=baz'))", "def test_update_basic_usage(self):\n self.assertEqual(\n self._render_tag(tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=bar'),\n '?foo=bar')", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def test_updated_nonexistent(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 2, 'format': 'json', 'updated': 1}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(response.status_code, 200)", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_remove_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"=foo\" %}',\n query_str='foo=foo&foo=bar&baz=baz&=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=bar&baz=baz'))", "def update(request):\n return 0", "def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n for key, value in kwargs.items():\n if value == '':\n if key in params:\n del params[key]\n else:\n params[key] = value\n return ('?' + params.urlencode()) if params else ''", "def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def update_settings(self, param):\n if param.name() == '':\n pass", "def test_updated_invalid(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json',\n 'updated': 1, 'updated_date': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(1, json.loads(response.content)['total'])", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def test_partial_update_should_not_be_allowed(self):\n response = self.client.patch(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def sentinel(request):\n return request.param", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def param_replace(context, **kwargs):\n d = context['request'].GET.copy()\n for k,v in kwargs.items():\n d[k] = v\n for k in [k for k,v in d.items() if not v]:\n del d[k]\n return d.urlencode()", "def test_update_returns_entry_1(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[0][\"title\"],\n body=ENTRIES[0][\"body\"],\n creation_date=ENTRIES[0][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[0][\"title\"]\n assert query_reslts.body == ENTRIES[0][\"body\"]" ]
[ "0.85938555", "0.8381606", "0.7849045", "0.760292", "0.7579912", "0.73541963", "0.7286511", "0.7055417", "0.67807907", "0.6305068", "0.62017447", "0.61609757", "0.6049829", "0.587832", "0.57909924", "0.57764435", "0.5737658", "0.57255924", "0.5694696", "0.5607382", "0.55810195", "0.5575199", "0.557047", "0.5529506", "0.5529506", "0.5497353", "0.5460258", "0.5440908", "0.542273", "0.5397261" ]
0.86261064
0
Testing {% querystring "update" %} with no key
def test_update_with_no_key(self): rendered_result = self._render_tag( tag='{% querystring "update" "=foo" %}', query_str='') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_update_with_existing_query_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=foo&bar=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=baz'))", "def test_update_basic_usage(self):\n self.assertEqual(\n self._render_tag(tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=bar'),\n '?foo=bar')", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_updated_nonexistent(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 2, 'format': 'json', 'updated': 1}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(response.status_code, 200)", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def test_remove_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"=foo\" %}',\n query_str='foo=foo&foo=bar&baz=baz&=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=bar&baz=baz'))", "def update(request):\n return 0", "def test_update_returns_entry_1(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[0][\"title\"],\n body=ENTRIES[0][\"body\"],\n creation_date=ENTRIES[0][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[0][\"title\"]\n assert query_reslts.body == ENTRIES[0][\"body\"]", "def test_update_returns_entry_random(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=\"WAT\",\n body=\"Bob Dole\",\n creation_date=\"1/2/3\")\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == \"WAT\"\n assert query_reslts.body == \"Bob Dole\"", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def test_update_returns_entry_2(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[1][\"title\"],\n body=ENTRIES[1][\"body\"],\n creation_date=ENTRIES[1][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[1][\"title\"]\n assert query_reslts.body == ENTRIES[1][\"body\"]", "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def updated_query(request, *args):\n # NOTE: it returns a dict not a QueryDict\n\n # recall query_to_dict returns key-val sequence\n # filter out the search key\n updated = {k: v for k, v in query_to_dict(request.GET.copy()) if\n k != \"search\"}\n\n # the args must at least have a key + value\n if len(args) < 2:\n return updated\n\n # helper function to update key-in\n def key_in(dic, keys, val):\n k = keys[0]\n # TODO : broken in the sense that I seem to be only updating\n # lists\n if len(keys) == 1:\n if isinstance(dic[k], list) and val not in dic[k]:\n dic[k].append(val)\n else:\n key_in(dic[k], keys[1:], val)\n\n # call key_in to update\n key_in(updated, args[:-1], args[-1])\n\n # return the updated dict (NOTE: this is not\n # a query dict\n return updated", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_updated_invalid(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json',\n 'updated': 1, 'updated_date': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(1, json.loads(response.content)['total'])", "def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url", "def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n for key, value in kwargs.items():\n if value == '':\n if key in params:\n del params[key]\n else:\n params[key] = value\n return ('?' + params.urlencode()) if params else ''", "def test_map_update_none_deletes_key(self):\r\n # partition = uuid4()\r\n # cluster = 1\r\n # TestQueryUpdateModel.objects.create(\r\n # partition=partition, cluster=cluster,\r\n # text_map={\"foo\": '1', \"bar\": '2'})\r\n # TestQueryUpdateModel.objects(\r\n # partition=partition, cluster=cluster).update(\r\n # text_map__update={\"bar\": None})\r\n # obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster)\r\n # self.assertEqual(obj.text_map, {\"foo\": '1'})\r" ]
[ "0.83767027", "0.8260205", "0.8205793", "0.8035107", "0.7845689", "0.7674357", "0.7564186", "0.7472058", "0.73783904", "0.6436952", "0.63122505", "0.6225137", "0.61859393", "0.612663", "0.6009522", "0.5991379", "0.5983234", "0.5963799", "0.5881154", "0.5877678", "0.58691776", "0.5811503", "0.5811503", "0.57892126", "0.5786322", "0.5765823", "0.5763515", "0.5700861", "0.5697283", "0.56873304" ]
0.8736628
0
Testing {% querystring "update" %} by updating multiple values of a key value
def test_with_updating_multiple_values_of_a_key(self): rendered_result = self._render_tag( tag='{% querystring "update" "a=1&a=2" %}', query_str='foo=foo') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=foo&a=1&a=2'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def test_update_with_existing_query_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=foo&bar=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=baz'))", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_basic_usage(self):\n self.assertEqual(\n self._render_tag(tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=bar'),\n '?foo=bar')", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_many(self):\n sample_input = \"\"\"\nfoo=100\nbar=200, baz=300\n\"\"\"\n self.assertNotEquals(self.param_dict.get(\"foo\"), 100)\n self.assertNotEquals(self.param_dict.get(\"bar\"), 200)\n self.assertNotEquals(self.param_dict.get(\"baz\"), 300)\n result = self.param_dict.update_many(sample_input)\n log.debug(\"result: %s\", result)\n self.assertEquals(result[\"foo\"], True)\n self.assertEquals(result[\"bar\"], True)\n self.assertEquals(result[\"baz\"], True)\n self.assertEquals(self.param_dict.get(\"foo\"), 100)\n self.assertEquals(self.param_dict.get(\"bar\"), 200)\n self.assertEquals(self.param_dict.get(\"baz\"), 300)", "def updated_query(request, *args):\n # NOTE: it returns a dict not a QueryDict\n\n # recall query_to_dict returns key-val sequence\n # filter out the search key\n updated = {k: v for k, v in query_to_dict(request.GET.copy()) if\n k != \"search\"}\n\n # the args must at least have a key + value\n if len(args) < 2:\n return updated\n\n # helper function to update key-in\n def key_in(dic, keys, val):\n k = keys[0]\n # TODO : broken in the sense that I seem to be only updating\n # lists\n if len(keys) == 1:\n if isinstance(dic[k], list) and val not in dic[k]:\n dic[k].append(val)\n else:\n key_in(dic[k], keys[1:], val)\n\n # call key_in to update\n key_in(updated, args[:-1], args[-1])\n\n # return the updated dict (NOTE: this is not\n # a query dict\n return updated", "def test_append_with_multiple_values_and_same_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1&a=2&a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def test_append_with_multiple_values_and_same_key_seperated(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1\" \"a=2\" \"a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def update_query(self, **updates):\r\n self._url_updates.update(updates)", "def test_update_multiple(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 14}, age=12)\n assert n_updated == 2\n items = list(test_store.get_by())\n\n andy.age = pandy.age = 14\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def updated_query_str(request, *args):\n\n return urllib.urlencode(updated_query(request, *args))", "def update_field(current_values: List[Any], request_data: Dict[str, Any], key: str, new_values: List[Any]) -> None:\n if new_values and new_values[0] == \"-\" and current_values != new_values[1:]:\n request_data[key] = new_values[1:]\n return\n\n combined_values = current_values + list(set(new_values) - set(current_values))\n if current_values != combined_values:\n request_data[key] = combined_values", "def update(*args):", "def test_update(self):\n self.assertEqual(['UPDATE', 'test', 'set a=1'],\n grammar._UPDATE_EXPR.parseString(\"UPDATE test set a=1;\").asList())", "def test_update():\n payload = {'age': 99}\n sample_uuid = get_sample_id()\n response = requests.put(f'http://localhost:5000/api/persons/{sample_uuid}', json=payload)\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data", "def test_sqlite_update(self):\r\n self._get_good_request()\r\n\r\n # now we need to do another request with updated tag string\r\n self._get_good_request(new_tags=u\"google books icons\")\r\n\r\n search_res = self.testapp.get('/admin/results?search=icon')\r\n self.assertTrue(\r\n search_res.status == '200 OK',\r\n \"Status is 200: \" + search_res.status)\r\n\r\n self.assertTrue(\r\n 'icon' in search_res.body,\r\n \"We should find the new tag icon on the page: \" + search_res.body)", "def _build_update_params(self, params):", "def gen_update(params, data):\n result = {}\n for key, value in data.iteritems():\n if key in params:\n result[key] = value\n return result", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))" ]
[ "0.84186804", "0.810681", "0.7557398", "0.7449745", "0.7355413", "0.7349155", "0.7247374", "0.7078936", "0.705895", "0.66061294", "0.64534163", "0.6402692", "0.6355075", "0.6283492", "0.6176012", "0.61271197", "0.5983697", "0.5941984", "0.59279925", "0.59279925", "0.5825642", "0.5808024", "0.578756", "0.5719687", "0.5702318", "0.57015735", "0.5701113", "0.56864136", "0.5681501", "0.5675555" ]
0.8602916
0
Testing {% querystring "append" %} with appending multiple values of a key
def test_append_with_multiple_values_and_same_key(self): rendered_result = self._render_tag( tag='{% querystring "append" "a=1&a=2&a=3" %}', query_str='a=0&&b=2&c=3') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_append_with_multiple_values_and_same_key_seperated(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1\" \"a=2\" \"a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def test_append_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"foo=baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=baz&bar=bar'))", "def append_query_param(url: str, key: str, value: str) -> str:\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)", "def add_query_param(request, key, val):\n iri = request.get_full_path()\n uri = iri_to_uri(iri)\n return escape(replace_query_param(uri, key, val))", "def append_to_query_string(url, key, value) -> str:\n url = list(urlparse(url))\n query = dict(parse_qsl(url[4]))\n query[key] = value\n url[4] = '&'.join(f'{p}={v}' for p, v in query.items())\n\n return urlunparse(url)", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def query_append(*query_params):\n li = []\n for qp in query_params:\n qs = urlencode_s(query_unflatten(qp))\n if qs:\n li.append(qs)\n return \"&\".join(li)", "def test_query_append(self):\n self.assertEqual(self.gmail_case.query_dict, \n {'aqs': 'chrome..69i57j0l3.9438j0', 'ie': 'UTF-8', \n 'oq': 'setter+python', 'q': 'setter+python', \n 'sourceid': 'chrome'})\n self.gmail_case.set_query_arg('Ladies + Gentlemen')\n self.assertEqual(self.gmail_case.query_dict, \n {'aqs': 'chrome..69i57j0l3.9438j0', 'ie': 'UTF-8', \n 'oq': 'setter+python', 'q': 'setter+python',\n 'Ladies + Gentlemen': None,\n 'sourceid': 'chrome'})\n self.foo_case.set_query_arg('demo_key', 'demo_value')\n self.assertEqual(self.foo_case.get_query_arg('demo_key'), 'demo_value')", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''", "def data_append(ctx, data, key, value):\n assert isinstance(ctx, Wtp)\n assert isinstance(data, dict)\n assert isinstance(key, str)\n\n if key in str_keys:\n assert isinstance(value, str)\n elif key in dict_keys:\n assert isinstance(value, dict)\n if key == \"tags\":\n if value == \"\":\n return\n lst = data.get(key, [])\n lst.append(value)\n data[key] = lst", "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_deep_append(self):\n sdict = {\"bar\": {\"baz\": [1, 2]}}\n res = dictupdate.append_dict_key_value(sdict, \"bar:baz\", 42)\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42]}}, res)\n # Append with alternate delimiter\n res = dictupdate.append_dict_key_value(sdict, \"bar~baz\", 43, delimiter=\"~\")\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42, 43]}}, res)\n # Append to a not-yet existing list\n res = dictupdate.append_dict_key_value({}, \"foo:bar:baz\", 42)\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": [42]}}}, res)", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def query_add(*query_params):\n d = {}\n for qp in query_params:\n qp = query_unflatten(qp)\n for name, value in qp.items():\n if name in d:\n d[name].extend(value)\n else:\n d[name] = value\n return d", "def append_query_element(self, val, append=\", \"):\n self.q_str = append.join([self.q_str, val])", "def test_append_to_results(self):\n # pre conditions\n field = 'foo'\n value = 'bar'\n existing = ['baz']\n existing.append(value)\n values_dict = {field: existing}\n\n # test\n result = gen.append_to_results(field, value, values_dict)\n\n # post conditions\n expected = ['baz', 'bar', 'bar']\n self.assertEqual(result, expected)", "def querystring(data, exclude=(), **kwargs):\n items = reduce(operator.add, (\n list((k, v) for v in values)\n for k, values in data.lists() if k not in exclude\n ), [])\n\n for key, value in kwargs.items():\n items.append((key, force_text(value)))\n\n return urlencode(sorted(items))", "def query_string(context, add=None, remove=None):\n # Written as an inclusion tag to simplify getting the context.\n add = string_to_dict(add)\n remove = string_to_list(remove)\n params = dict(context['request'].GET.items())\n response = get_query_string(params, add, remove)\n return {'response': response}", "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def key_list(request):\n res = [request.param]\n for i in range(10):\n res.append(request.param + str(i))\n return res", "def _extend_url(self, url, params):\n # filter out None parameters\n params = {k:v for k,v in params.items() if v is not None}\n for key in params:\n url = url + \"&{}={}\".format(key, params[key])\n return url", "def add_parameters_to_url(path, **kwargs):\n return path + \"?\" + urllib.urlencode(kwargs)", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))" ]
[ "0.8498463", "0.8341243", "0.76318073", "0.6942017", "0.6838789", "0.6705557", "0.65282977", "0.6202453", "0.6140832", "0.611182", "0.60828626", "0.59720445", "0.5939296", "0.5853891", "0.58489376", "0.58000696", "0.58000696", "0.56923723", "0.56226695", "0.5602836", "0.55750656", "0.5564172", "0.55626285", "0.5560019", "0.55438834", "0.55418754", "0.55244154", "0.5520439", "0.5463238", "0.5440756" ]
0.85816944
0
Testing {% querystring "append" %} with appending multiple values of a key fragment
def test_append_with_multiple_values_and_same_key_seperated(self): rendered_result = self._render_tag( tag='{% querystring "append" "a=1" "a=2" "a=3" %}', query_str='a=0&&b=2&c=3') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_append_with_multiple_values_and_same_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1&a=2&a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def test_append_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"foo=baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=baz&bar=bar'))", "def append_query_param(url: str, key: str, value: str) -> str:\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)", "def add_query_param(request, key, val):\n iri = request.get_full_path()\n uri = iri_to_uri(iri)\n return escape(replace_query_param(uri, key, val))", "def append_to_query_string(url, key, value) -> str:\n url = list(urlparse(url))\n query = dict(parse_qsl(url[4]))\n query[key] = value\n url[4] = '&'.join(f'{p}={v}' for p, v in query.items())\n\n return urlunparse(url)", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_query_append(self):\n self.assertEqual(self.gmail_case.query_dict, \n {'aqs': 'chrome..69i57j0l3.9438j0', 'ie': 'UTF-8', \n 'oq': 'setter+python', 'q': 'setter+python', \n 'sourceid': 'chrome'})\n self.gmail_case.set_query_arg('Ladies + Gentlemen')\n self.assertEqual(self.gmail_case.query_dict, \n {'aqs': 'chrome..69i57j0l3.9438j0', 'ie': 'UTF-8', \n 'oq': 'setter+python', 'q': 'setter+python',\n 'Ladies + Gentlemen': None,\n 'sourceid': 'chrome'})\n self.foo_case.set_query_arg('demo_key', 'demo_value')\n self.assertEqual(self.foo_case.get_query_arg('demo_key'), 'demo_value')", "def query_append(*query_params):\n li = []\n for qp in query_params:\n qs = urlencode_s(query_unflatten(qp))\n if qs:\n li.append(qs)\n return \"&\".join(li)", "def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''", "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def data_append(ctx, data, key, value):\n assert isinstance(ctx, Wtp)\n assert isinstance(data, dict)\n assert isinstance(key, str)\n\n if key in str_keys:\n assert isinstance(value, str)\n elif key in dict_keys:\n assert isinstance(value, dict)\n if key == \"tags\":\n if value == \"\":\n return\n lst = data.get(key, [])\n lst.append(value)\n data[key] = lst", "def test_deep_append(self):\n sdict = {\"bar\": {\"baz\": [1, 2]}}\n res = dictupdate.append_dict_key_value(sdict, \"bar:baz\", 42)\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42]}}, res)\n # Append with alternate delimiter\n res = dictupdate.append_dict_key_value(sdict, \"bar~baz\", 43, delimiter=\"~\")\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42, 43]}}, res)\n # Append to a not-yet existing list\n res = dictupdate.append_dict_key_value({}, \"foo:bar:baz\", 42)\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": [42]}}}, res)", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def query_string(context, add=None, remove=None):\n # Written as an inclusion tag to simplify getting the context.\n add = string_to_dict(add)\n remove = string_to_list(remove)\n params = dict(context['request'].GET.items())\n response = get_query_string(params, add, remove)\n return {'response': response}", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def append_query_element(self, val, append=\", \"):\n self.q_str = append.join([self.q_str, val])", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_update_with_existing_query_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=foo&bar=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=baz'))", "def querystring(data, exclude=(), **kwargs):\n items = reduce(operator.add, (\n list((k, v) for v in values)\n for k, values in data.lists() if k not in exclude\n ), [])\n\n for key, value in kwargs.items():\n items.append((key, force_text(value)))\n\n return urlencode(sorted(items))", "def _extend_url(self, url, params):\n # filter out None parameters\n params = {k:v for k,v in params.items() if v is not None}\n for key in params:\n url = url + \"&{}={}\".format(key, params[key])\n return url", "def query_add(*query_params):\n d = {}\n for qp in query_params:\n qp = query_unflatten(qp)\n for name, value in qp.items():\n if name in d:\n d[name].extend(value)\n else:\n d[name] = value\n return d", "def add_parameters_to_url(path, **kwargs):\n return path + \"?\" + urllib.urlencode(kwargs)" ]
[ "0.86375725", "0.84450185", "0.781504", "0.68740207", "0.67290455", "0.66967934", "0.6629074", "0.62867236", "0.6149132", "0.61116165", "0.6078402", "0.6041519", "0.59463435", "0.5895472", "0.5895472", "0.58687717", "0.5769661", "0.5714091", "0.5696046", "0.56622285", "0.564351", "0.56304353", "0.5614085", "0.5587171", "0.5578828", "0.55725783", "0.55664384", "0.5530869", "0.55225027", "0.5468989" ]
0.8574146
1
Testing {% querystring "append" %} with appending new keyvalue pair
def test_append_with_new_key(self): rendered_result = self._render_tag( tag='{% querystring "append" "d=4" %}', query_str='a=1&b=2&c=3') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('a=1&b=2&c=3&d=4'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_append_with_multiple_values_and_same_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1&a=2&a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def test_append_with_multiple_values_and_same_key_seperated(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"a=1\" \"a=2\" \"a=3\" %}',\n query_str='a=0&&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=0&a=1&a=2&a=3&b=2&c=3'))", "def test_append_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"foo=baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=baz&bar=bar'))", "def append_query_param(url: str, key: str, value: str) -> str:\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)", "def add_query_param(request, key, val):\n iri = request.get_full_path()\n uri = iri_to_uri(iri)\n return escape(replace_query_param(uri, key, val))", "def append_to_query_string(url, key, value) -> str:\n url = list(urlparse(url))\n query = dict(parse_qsl(url[4]))\n query[key] = value\n url[4] = '&'.join(f'{p}={v}' for p, v in query.items())\n\n return urlunparse(url)", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''", "def test_query_append(self):\n self.assertEqual(self.gmail_case.query_dict, \n {'aqs': 'chrome..69i57j0l3.9438j0', 'ie': 'UTF-8', \n 'oq': 'setter+python', 'q': 'setter+python', \n 'sourceid': 'chrome'})\n self.gmail_case.set_query_arg('Ladies + Gentlemen')\n self.assertEqual(self.gmail_case.query_dict, \n {'aqs': 'chrome..69i57j0l3.9438j0', 'ie': 'UTF-8', \n 'oq': 'setter+python', 'q': 'setter+python',\n 'Ladies + Gentlemen': None,\n 'sourceid': 'chrome'})\n self.foo_case.set_query_arg('demo_key', 'demo_value')\n self.assertEqual(self.foo_case.get_query_arg('demo_key'), 'demo_value')", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_with_existing_query_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='foo=foo&bar=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=baz'))", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url", "def test_update_with_existing_query_with_two_args_override(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" \"qux=baz\" %}',\n query_str='foo=foo&bar=bar&baz=baz&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar&bar=bar&baz=baz&qux=baz'))", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def data_append(ctx, data, key, value):\n assert isinstance(ctx, Wtp)\n assert isinstance(data, dict)\n assert isinstance(key, str)\n\n if key in str_keys:\n assert isinstance(value, str)\n elif key in dict_keys:\n assert isinstance(value, dict)\n if key == \"tags\":\n if value == \"\":\n return\n lst = data.get(key, [])\n lst.append(value)\n data[key] = lst", "def query_string(context, add=None, remove=None):\n # Written as an inclusion tag to simplify getting the context.\n add = string_to_dict(add)\n remove = string_to_list(remove)\n params = dict(context['request'].GET.items())\n response = get_query_string(params, add, remove)\n return {'response': response}", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def query_append(*query_params):\n li = []\n for qp in query_params:\n qs = urlencode_s(query_unflatten(qp))\n if qs:\n li.append(qs)\n return \"&\".join(li)", "def test_update_with_multiple_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar=baz\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=bar=baz'))", "def test_deep_append(self):\n sdict = {\"bar\": {\"baz\": [1, 2]}}\n res = dictupdate.append_dict_key_value(sdict, \"bar:baz\", 42)\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42]}}, res)\n # Append with alternate delimiter\n res = dictupdate.append_dict_key_value(sdict, \"bar~baz\", 43, delimiter=\"~\")\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42, 43]}}, res)\n # Append to a not-yet existing list\n res = dictupdate.append_dict_key_value({}, \"foo:bar:baz\", 42)\n self.assertEqual({\"foo\": {\"bar\": {\"baz\": [42]}}}, res)", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def _add_query_param(self, route_path, name, type_, default=None):\n route = self._find_route(route_path)\n # logging.info(\"Before:\", route.dependant.query_params)\n query_param = create_query_param(name, type_, default)\n route.dependant.query_params.append(query_param)\n # logging.info(\"After:\", route.dependant.query_params)", "def append_query_element(self, val, append=\", \"):\n self.q_str = append.join([self.q_str, val])", "def test_remove_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"=foo\" %}',\n query_str='foo=foo&foo=bar&baz=baz&=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=bar&baz=baz'))", "def test_append_to_results(self):\n # pre conditions\n field = 'foo'\n value = 'bar'\n existing = ['baz']\n existing.append(value)\n values_dict = {field: existing}\n\n # test\n result = gen.append_to_results(field, value, values_dict)\n\n # post conditions\n expected = ['baz', 'bar', 'bar']\n self.assertEqual(result, expected)" ]
[ "0.8260465", "0.81529766", "0.8100246", "0.7122451", "0.70814383", "0.68417764", "0.65832335", "0.632912", "0.6328968", "0.63135624", "0.62243444", "0.6187808", "0.6101339", "0.6048709", "0.6048709", "0.5992231", "0.59557575", "0.59342337", "0.59126395", "0.5843746", "0.5830494", "0.58268857", "0.5821329", "0.58190894", "0.5798758", "0.5741678", "0.57412386", "0.5703812", "0.5586537", "0.55711216" ]
0.87845033
0
Testing {% querystring "remove" %} by attempting to remove a nonexisting key
def test_remove_with_key_not_in_querystring(self): rendered_result = self._render_tag( tag='{% querystring "remove" "baz" %}', query_str='foo=foo&bar=bar') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=foo&bar=bar'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"=foo\" %}',\n query_str='foo=foo&foo=bar&baz=baz&=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=bar&baz=baz'))", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def test_remove_with_multiple_removes(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" \"bar\" \"baz=1\" %}',\n query_str='foo=foo&bar=bar&foo=&baz=1&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('qux=qux'))", "def remove(self, key):", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def delete(self, key):", "def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()", "def _testRemove(self):\n key = ('foo', 'bar')\n data = r'text!\\nthere'\n\n with self.cache.Lookup(key) as ref:\n self.assertFalse(ref.Exists())\n ref.AssignText(data)\n self.assertTrue(ref.Exists())\n ref.Remove()\n self.assertFalse(ref.Exists())", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def remove(self, key):\n pass", "def delete(self, keyword, key):", "def remove(self, key_name: str):\n pass", "def test_remove_key_not_dict(self):\n\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def test_remove(self):\n pass", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n actual = Dict(self.test_subject).remove_key([\"Py\", \"test\"])\n\n self.assertEqual(expected, actual)", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\", \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.to_test).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n # Test of the case that a dict is not given\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def remove_record():\n # could use .../record/<name> in URL or as in this case as an argument .../record?name=bob\n if 'name' not in request.args:\n return \"need a name to delete a record!\", 400\n with RECORD_LOCK:\n if len([r for r in RECORDS if r.get('name') == request.args.get('name')]) == 0:\n return \"no such record found!\", 409\n RECORDS[:] = [r for r in RECORDS if r.get( # copy all but name matches\n 'name') != request.args.get('name')]\n return \"OK\"", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def remove_tag(request, ck, tag_name):\n\n refresh_template = request.session[constants.ACTUAL_TEMPLATE]\n\n tags = request.session[constants.ADD_TAGS]\n tag = next(el for el in tags if el.name == tag_name)\n\n if ck != \"0\":\n coding = get_object_or_404(CodingProject, id=ck)\n\n # TODO: Review this \n us = get_user(request)\n user = us\n\n # Coding must have been created by the current user and\n if coding.coder != user.id:\n raise Http404\n\n if coding.tags.filter(name=tag_name):\n cache_list = request.session[constants.REM_TAGS]\n cache_list.append(tag)\n\n tags.remove(tag)\n request.session[constants.ADD_TAGS] = tags\n\n # TODO: Centralize this?\n return HttpResponseRedirect(refresh_template)", "def delete(self,key):\n\n pass", "def remove(self, item):\n del self._dict[item]", "def test_remove_key_not_found(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"Py\": \"Funceble\",\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key(\"xxx\")\n\n self.assertEqual(expected, actual)", "def test_remove(self):\n\n message = {\"method\": \"remove\",\n \"params\": {\"elem\": self.container_to_remove}}\n response = yield self._get_response(message)\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"remove\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_remove\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertNotIn(container_name, containers.keys(),\n \"Container has found\")", "def remove(name):" ]
[ "0.84152013", "0.80939305", "0.79133326", "0.79089874", "0.7750738", "0.7373129", "0.7191346", "0.6528528", "0.6507897", "0.6199616", "0.61719894", "0.6120845", "0.6088209", "0.6058557", "0.60254574", "0.6016976", "0.5947428", "0.5919038", "0.58856094", "0.5815144", "0.5778123", "0.5759228", "0.5721101", "0.57173866", "0.5692503", "0.5673428", "0.5671518", "0.56612426", "0.5659265", "0.56575245" ]
0.83562726
1
Testing {% querystring "remove" %} by removing all instances of a key
def test_remove_with_key_appearing_multiple_times(self): rendered_result = self._render_tag( tag='{% querystring "remove" "foo" %}', query_str='foo=foo&foo=bar&bar=bar') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"=foo\" %}',\n query_str='foo=foo&foo=bar&baz=baz&=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=bar&baz=baz'))", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def test_remove_with_multiple_removes(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" \"bar\" \"baz=1\" %}',\n query_str='foo=foo&bar=bar&foo=&baz=1&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('qux=qux'))", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def remove(self, key):", "def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()", "def delete(self, key):", "def delete(self, keyword, key):", "def remove(self, key):\n pass", "def remove(self, key_name: str):\n pass", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def f_remove(self, *args):\n for arg in args:\n arg = self.f_translate_key(arg)\n if arg in self._data:\n del self._data[arg]\n else:\n raise AttributeError(\n \"Your result `%s` does not contain %s.\" % (self.name_, arg)\n )", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n actual = Dict(self.test_subject).remove_key([\"Py\", \"test\"])\n\n self.assertEqual(expected, actual)", "def test_remove_multiple_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key([\"funilrys\", \"Py\"])\n\n self.assertEqual(expected, actual)", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def remove():", "def dal_delete(key):\n global store\n return store.delete(urllib.quote(key))", "def delete(self, key):\n pass", "def delete(self, key):\n pass", "def discard_key_from_tag(self,tag,key):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n\r\n self.tag_dict[tag].discard(key)\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n value_tuple = (notebookname,tag,key,)\r\n db_cursor.execute(\"DELETE FROM tags_to_keys\"\r\n +\" WHERE notebook=? AND tag=?\"\r\n +\" AND keyword=?;\",\r\n value_tuple)", "def remove(self, key):\n del self[key]", "def delete(self,key):\n\n pass", "def remove(self, key: int | str):\n self.__delitem__(key)", "def remove(enforcer_dict, key):\n del enforcer_dict['f']\n assert other.keystring == 'abcde'\n assert other.valuesum == 15\n\n enforcer_dict['a'] = 2\n assert other.keystring == 'bcdea'\n assert other.valuesum == 16\n\n enforcer_dict.clear()\n assert other.keystring == ''\n assert other.valuesum == 0", "def remove(name):", "def __delitem__(self, key: Union[Hashable, Sequence[Hashable]]) -> None:\n self.contents = {i: self.contents[i] for i in self.contents \n if i not in more_itertools.always_iterable(key)}\n return", "def remove(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n a[h] = None" ]
[ "0.85173905", "0.8459718", "0.8431988", "0.80031836", "0.7982969", "0.7942104", "0.7938666", "0.67572826", "0.6434704", "0.63995194", "0.62388736", "0.62070173", "0.6081511", "0.6012274", "0.5948167", "0.5887117", "0.586309", "0.58515227", "0.583285", "0.579642", "0.57885164", "0.57885164", "0.57837397", "0.57502145", "0.57286483", "0.5718842", "0.5702314", "0.5691261", "0.5676697", "0.5666895" ]
0.85131925
1
Testing {% querystring "remove" %} by removing a specific keyvalue pair
def test_remove_for_specific_key_value_pairs(self): rendered_result = self._render_tag( tag='{% querystring "remove" "a=4" %}', query_str='a=1&a=2&a=3&a=4') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('a=1&a=2&a=3&'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_remove_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"=foo\" %}',\n query_str='foo=foo&foo=bar&baz=baz&=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=bar&baz=baz'))", "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def test_remove_with_multiple_removes(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" \"bar\" \"baz=1\" %}',\n query_str='foo=foo&bar=bar&foo=&baz=1&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('qux=qux'))", "def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def remove(self, key):", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def query_string(context, add=None, remove=None):\n # Written as an inclusion tag to simplify getting the context.\n add = string_to_dict(add)\n remove = string_to_list(remove)\n params = dict(context['request'].GET.items())\n response = get_query_string(params, add, remove)\n return {'response': response}", "def remove_tag(request, ck, tag_name):\n\n refresh_template = request.session[constants.ACTUAL_TEMPLATE]\n\n tags = request.session[constants.ADD_TAGS]\n tag = next(el for el in tags if el.name == tag_name)\n\n if ck != \"0\":\n coding = get_object_or_404(CodingProject, id=ck)\n\n # TODO: Review this \n us = get_user(request)\n user = us\n\n # Coding must have been created by the current user and\n if coding.coder != user.id:\n raise Http404\n\n if coding.tags.filter(name=tag_name):\n cache_list = request.session[constants.REM_TAGS]\n cache_list.append(tag)\n\n tags.remove(tag)\n request.session[constants.ADD_TAGS] = tags\n\n # TODO: Centralize this?\n return HttpResponseRedirect(refresh_template)", "def remove(name):", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def delete(self, key):", "def exclude_keys(value, *exclude):\n\n if not isinstance(value, QueryDict):\n raise RuntimeError(\"getquerydict should be used with QueryDict instances only (e.g. request.GET)\")\n\n value = value.copy()\n for key in exclude:\n if key in value: del value[key]\n return value", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def remove():", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def remove(self, key):\n pass", "def delete(self, keyword, key):", "def test_remove(self):\n test_remove = self.info_list.remove(\"сахар 1кг\")\n self.assertEqual(test_remove, \"сахар 1кг\")", "def remove(self, key_name: str):\n pass", "def test_remove(self):\n\n message = {\"method\": \"remove\",\n \"params\": {\"elem\": self.container_to_remove}}\n response = yield self._get_response(message)\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"remove\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_remove\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertNotIn(container_name, containers.keys(),\n \"Container has found\")", "def remove(self, item):\n del self._dict[item]", "def remove_record():\n # could use .../record/<name> in URL or as in this case as an argument .../record?name=bob\n if 'name' not in request.args:\n return \"need a name to delete a record!\", 400\n with RECORD_LOCK:\n if len([r for r in RECORDS if r.get('name') == request.args.get('name')]) == 0:\n return \"no such record found!\", 409\n RECORDS[:] = [r for r in RECORDS if r.get( # copy all but name matches\n 'name') != request.args.get('name')]\n return \"OK\"", "def remove_item_page(request):\n validate(instance=request.body, schema=item_schema_remove)\n body = json.loads(request.body)\n Item.remove_item(body['item_id'])\n return HttpResponse('success')", "def remove(request):\n\tID = request.GET.get('id',False)\n\n\tif not ID:\n\t\tresponse = {\"error\":\"id not entered\"}\n\telse:\n\t\tID = str(ID)\n\t\tk = 0\n\t\tfor i,task in enumerate(todo['task']):\n\t\t\ttask_id = task.get('id',False)\n\t\t\tif ID == task_id:\n\t\t\t\tk += 1\n\t\t\t\tidx = i\n\t\tif k == 0:\n\t\t\tresponse = {\"error\":\"id not fount\"}\n\t\telse:\n\t\t\tresponse = todo['task'].pop(idx)\n\n\treturn JsonResponse(response)", "def delete_parameter(request, parameter, **_kwargs):\n pass" ]
[ "0.86211497", "0.86141086", "0.8397185", "0.8291769", "0.8162063", "0.81378734", "0.79093105", "0.65594715", "0.6130791", "0.60870177", "0.5993961", "0.56719804", "0.56274676", "0.5627308", "0.56103635", "0.56000274", "0.5595212", "0.5594768", "0.5589343", "0.55880225", "0.55753636", "0.5552674", "0.5548485", "0.5530267", "0.55264044", "0.5523129", "0.55210656", "0.5509253", "0.5506551", "0.5503084" ]
0.8716594
0
Testing {% querystring "remove" %} by removing a value with no key
def test_remove_with_no_key(self): rendered_result = self._render_tag( tag='{% querystring "remove" "=foo" %}', query_str='foo=foo&foo=bar&baz=baz&=foo') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=foo&foo=bar&baz=baz'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_with_multiple_removes(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" \"bar\" \"baz=1\" %}',\n query_str='foo=foo&bar=bar&foo=&baz=1&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('qux=qux'))", "def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def remove(self, key):", "def exclude_keys(value, *exclude):\n\n if not isinstance(value, QueryDict):\n raise RuntimeError(\"getquerydict should be used with QueryDict instances only (e.g. request.GET)\")\n\n value = value.copy()\n for key in exclude:\n if key in value: del value[key]\n return value", "def remove_value(self, key: str) -> None:\n raise NotImplementedError", "def test_delete(self):\n mute_map = MutableMap(**VALUE)\n del mute_map.str_val\n del mute_map['dict_val']\n\n assert not mute_map.get('str_val')\n assert not mute_map.get('dict_val')", "def discard_value(collection, key, value):\n try:\n values = collection[key]\n except KeyError:\n pass\n else:\n values.discard(value)\n if not values:\n del collection[key]", "def must_remove(self, tag_name, tag_value):\n return self._ruleset[tag_name][tag_value].get(self.REMOVE_KEY, False)", "def remove(self, value):\n pass", "def test_remove_key_not_dict(self):\n\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def remove(self, key):\n pass", "def remove(self, value): # real signature unknown; restored from __doc__\n pass", "def delete(self, key):", "def removekwd(header, kwd):\n if kwd in header.keys():\n header.remove(kwd)\n return", "def query_string(context, add=None, remove=None):\n # Written as an inclusion tag to simplify getting the context.\n add = string_to_dict(add)\n remove = string_to_list(remove)\n params = dict(context['request'].GET.items())\n response = get_query_string(params, add, remove)\n return {'response': response}", "def remove():", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def remove_tag(self, key, value=None):\r\n if value:\r\n tags = {key : value}\r\n else:\r\n tags = [key]\r\n status = self.connection.delete_tags([self.id], tags)\r\n if key in self.tags:\r\n del self.tags[key]", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def test_remove_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\", \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.to_test).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)\n\n # Test of the case that a dict is not given\n expected = None\n actual = Dict([\"Hello\", \"World!\"]).remove_key(\"Py\")\n\n self.assertEqual(expected, actual)", "def remove(query):\n # type: (str) -> bool\n if not query or not SEARCH_SAVED:\n return False\n searches = retrieve()\n if query in searches:\n searches.remove(query)\n save(searches)\n return True\n return False" ]
[ "0.8768574", "0.8608548", "0.85124594", "0.8144512", "0.80228597", "0.8015881", "0.7719858", "0.6615589", "0.64976746", "0.64727014", "0.6124275", "0.6086564", "0.5976051", "0.58541024", "0.56783885", "0.5660003", "0.5650449", "0.5639908", "0.5620342", "0.55923486", "0.5554788", "0.5510519", "0.5482272", "0.54772055", "0.54722565", "0.54647267", "0.54590756", "0.5454134", "0.54353344", "0.54273707" ]
0.8808378
0
Testing {% querystring "remove" %} by removing multiple specific keyvalue pairs
def test_remove_with_multiple_specific_values(self): rendered_result = self._render_tag( tag='{% querystring "remove" "foo=1" "foo=2" %}', query_str='foo=1&foo=2&foo=3') self.assertTrue(rendered_result.startswith('?')) self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))", "def test_remove_with_multiple_removes(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" \"bar\" \"baz=1\" %}',\n query_str='foo=foo&bar=bar&foo=&baz=1&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('qux=qux'))", "def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_remove_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"=foo\" %}',\n query_str='foo=foo&foo=bar&baz=baz&=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=bar&baz=baz'))", "def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()", "def _remove_data(things, lst_remove=None):\n\n for data in things:\n data.pop(\"_sa_instance_state\", None)\n data.pop(\"user_id\", None)\n\n if lst_remove is not None:\n for str_remove in lst_remove:\n if str_remove in data:\n data.pop(str_remove, None)\n\n return things", "def exclude_keys(value, *exclude):\n\n if not isinstance(value, QueryDict):\n raise RuntimeError(\"getquerydict should be used with QueryDict instances only (e.g. request.GET)\")\n\n value = value.copy()\n for key in exclude:\n if key in value: del value[key]\n return value", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def query_string(context, add=None, remove=None):\n # Written as an inclusion tag to simplify getting the context.\n add = string_to_dict(add)\n remove = string_to_list(remove)\n params = dict(context['request'].GET.items())\n response = get_query_string(params, add, remove)\n return {'response': response}", "def f_remove(self, *args):\n for arg in args:\n arg = self.f_translate_key(arg)\n if arg in self._data:\n del self._data[arg]\n else:\n raise AttributeError(\n \"Your result `%s` does not contain %s.\" % (self.name_, arg)\n )", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def test_remove_nones_with_list_of_strings():\n payload = asdict(\n GetConfigurationPayload(key=[\"ClockAlignedDataInterval\", \"ConnectionTimeOut\"])\n )\n\n assert remove_nones(payload) == {\n \"key\": [\"ClockAlignedDataInterval\", \"ConnectionTimeOut\"]\n }", "def test_handle_removals_remove_field_names(self):\n original_fields = self.form.fields\n fields = original_fields.copy()\n remove_names = ['second', 'last']\n expected_fields = {name: field for name, field in fields.items() if name not in remove_names}\n self.form.removed_fields = {}\n self.form.remove_field_names = remove_names\n result = self.form.handle_removals(fields)\n\n self.assertEqual(len(original_fields), len(result) + len(remove_names))\n self.assertEqual(len(remove_names), len(self.form.removed_fields))\n self.assertEqual(0, len(self.form.remove_field_names))\n self.assertDictEqual(expected_fields, result)\n self.assertIs(fields, result)", "def remove(self, key):", "def test_remove_multiple_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key([\"funilrys\", \"Py\"])\n\n self.assertEqual(expected, actual)", "def modify_search(add=[], remove=[]):\n\n query = request.args.get('q', '').split()\n query = [x.strip() for x in query if x.strip()]\n\n for word in remove:\n if word in query:\n query.remove(word)\n\n for word in add:\n if word and word not in query:\n query.append(word)\n\n return \" \".join(query)", "def _remove_keys(results: dict, remove: list) -> dict:\n removed = {}\n for key, val in results.items():\n if key not in remove:\n removed[key] = val\n return removed", "def remove():", "def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, unique=True, keep_fragments=False):\n\n if isinstance(parameterlist, (six.text_type, bytes)):\n parameterlist = [parameterlist]\n url, fragment = urldefrag(url)\n base, _, query = url.partition('?')\n seen = set()\n querylist = []\n for ksv in query.split(sep):\n if not ksv:\n continue\n k, _, _ = ksv.partition(kvsep)\n if unique and k in seen:\n continue\n elif remove and k in parameterlist:\n continue\n elif not remove and k not in parameterlist:\n continue\n else:\n querylist.append(ksv)\n seen.add(k)\n url = '?'.join([base, sep.join(querylist)]) if querylist else base\n if keep_fragments:\n url += '#' + fragment\n return url", "def filter_checkpoint_parameter_by_list(origin_dict, param_filter):\n for key in list(origin_dict.keys()):\n for name in param_filter:\n if name in key:\n print(\"Delete parameter from checkpoint: \", key)\n del origin_dict[key]\n break", "def test_with_updating_multiple_values_of_a_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1&a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=1&a=2'))", "def remove(self, urls):\n path = \"authSettings/exemptedUrls?action=REMOVE_FROM_LIST\"\n return self._session.post(path, urls)", "def get_query_string(p, new_params=None, remove=None):\n if new_params is None:\n new_params = {}\n if remove is None:\n remove = []\n\n for r in remove:\n for k in p.keys():\n if k.startswith(r):\n del p[k]\n for k, v in new_params.items():\n if k in p and v is None:\n del p[k]\n elif v is not None:\n p[k] = v\n return mark_safe(\n '?' + '&amp;'.join(\n [u'%s=%s' % (k, v) for k, v in p.items()]\n ).replace(' ', '%20')\n )", "def remove_from_values(values, to_remove):\n to_keep = []\n for x in to_remove:\n if '!' in x:\n to_keep.append(x.replace(\"!\", \"\"))\n\n if len(to_keep) == 0:\n for x in to_remove:\n del values[x]\n else:\n tmp_values = values.copy()\n for key in tmp_values.keys():\n if key not in to_keep:\n del values[key]", "def test_append_with_new_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"d=4\" %}',\n query_str='a=1&b=2&c=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&c=3&d=4'))", "def remove_var(b, exclude):\n return dict((k, v) for k, v in b.items() if param_name(k) not in exclude)", "def remove_tag(args):" ]
[ "0.8700103", "0.86342984", "0.84553623", "0.83464617", "0.8266275", "0.8107934", "0.80839235", "0.6472443", "0.6022613", "0.59204817", "0.5677038", "0.5628828", "0.562024", "0.5582078", "0.5559137", "0.55267173", "0.5524983", "0.55198497", "0.55075717", "0.55037594", "0.55020857", "0.549432", "0.54502344", "0.54440975", "0.54420227", "0.54417175", "0.54401934", "0.54362637", "0.543291", "0.54114306" ]
0.8674158
1
Returned a rendered template tag using a query string. This will render a ``querystring`` template using the provided template tag, with autoescaping turned off, and with the given query string as would be provided in a URL.
def _render_tag(self, tag, query_str): t = Template('{%% load djblets_utils %%}' '{%% autoescape off %%}%s{%% endautoescape %%}' % tag) request = HttpRequest() if query_str: request.GET = QueryDict(query_str) return t.render(Context({ 'request': request, })).replace('&amp;', '&')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def template_string(template, **kwargs):\n\n temp = Template(template)\n return temp.render(**kwargs)", "def render_str(self, template, **params):\n return render_str(template, **params)", "def querystring(parser, token):\r\n bits = token.split_contents()\r\n tag = bits.pop(0)\r\n updates = token_kwargs(bits, parser)\r\n # ``bits`` should now be empty of a=b pairs, it should either be empty, or\r\n # have ``without`` arguments.\r\n if bits and bits.pop(0) != \"without\":\r\n raise TemplateSyntaxError(\"Malformed arguments to '%s'\" % tag)\r\n removals = [parser.compile_filter(bit) for bit in bits]\r\n return QuerystringNode(updates, removals)", "def render_str(self, template, **params):\n tmp = JINJA_ENV.get_template(template)\n return tmp.render(params)", "def render_string(self, template: str, **vars) -> str:", "def render_template(self, string, context=None):\n context = context or {}\n context = Context(context)\n return Template(string).render(context)", "def query_string(context, add=None, remove=None):\n # Written as an inclusion tag to simplify getting the context.\n add = string_to_dict(add)\n remove = string_to_list(remove)\n params = dict(context['request'].GET.items())\n response = get_query_string(params, add, remove)\n return {'response': response}", "def render(self, template: str, **vars) -> str:", "def render_string(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)", "def test_update_with_tag_existing_query(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=bar\" %}',\n query_str='a=1&b=2')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&b=2&foo=bar'))", "def render_str(self, template_name, **params):\n template = jinja_env.get_template(template_name)\n return template.render(params)", "def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))", "def renderstr_from_template(self, template, args=None):\n renderedtext = template.render_string(args)\n return renderedtext", "def render_str(template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)", "def test_update_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def render_str(template, **params):\n t = env.jinja_env.get_template(template)\n return t.render(params)", "def render_str(template, **params):\n\n template_jinja = jinja_env.get_template(template)\n return template_jinja.render(params)", "def _render_str(self, template, ** params):\n\n for key in params:\n if(isinstance(params[key], str)):\n params[key] = params[key].decode('utf-8')\n if(isinstance(params[key], dict)):\n for sub_key in params[key]:\n if(isinstance(params[key][sub_key], str)):\n params[key][sub_key] = params[key][sub_key].decode('utf-8')\n t = constants.JINJA_ENV.get_template(template)\n return t.render(params)", "def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))", "def test_update_with_querystring_key_overide(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"a=1\" \"a=2\" %}',\n query_str='foo=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&a=2'))", "def render_string(self, template_name, **kwargs):\n raise NotImplementedError()", "def render(template_string, dictionary=None):\n context = Context(dictionary)\n return Template(template_string).render(context)", "def get_query(self, query_args):\n\n query_template = Template(self.query_spec.query_template)\n query = query_template.substitute(**query_args)\n logger.debug(f\"Query: {query}\")\n return query", "def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))", "def test_update_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"=foo\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('=foo'))", "def test_append_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"append\" \"foo=baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=baz&bar=bar'))", "def test_update_with_empty_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"update\" \"foo=\" %}',\n query_str='')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo='))", "def render_func(raw_str: str) -> str:\n try:\n rendered_str = raw_str.format(**live_context)\n except KeyError as err:\n raise SQLTemplaterError(\n \"Failure in Python templating: {}. Have you configured your \"\n \"variables? https://docs.sqlfluff.com/en/stable/\"\n \"configuration.html#templating-configuration\".format(err)\n )\n return rendered_str", "def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)", "def url_replace(context, **kwargs):\n query = context['request'].GET.dict()\n query.update(kwargs)\n return urlencode(query)" ]
[ "0.5957292", "0.58817935", "0.57983536", "0.5778623", "0.57117337", "0.5596593", "0.55515957", "0.55368096", "0.5526971", "0.54989415", "0.54698455", "0.54629296", "0.5458646", "0.53809476", "0.5372585", "0.536746", "0.53206545", "0.5303815", "0.5260162", "0.5234884", "0.5216653", "0.5131473", "0.51220083", "0.509076", "0.507286", "0.5063236", "0.50606006", "0.503361", "0.50232613", "0.50232613" ]
0.78619885
0
Return a filter key and value if exact filter exists for name.
def get_exact_filter_by_name(self, name): for entry in self.filters: if (entry['type'] == 'filter' and entry['name'] == name and entry['comparator'] == 'equals'): return entry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_filter(name):\n try:\n return FILTERS[name.upper()]\n except:\n msg = 'Unknown model of filter {}, options are {}'\n raise ValueError(msg.format(name, list(FILTERS.keys())))", "def manifest_filter(self, name):\n if not name:\n return self._data.index\n else:\n name = self._verify_filter_name(name, None)\n if not self.is_filter(name):\n raise KeyError('{} is no valid filter-variable.'.format(name))\n return self.take({name: 0})", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def filter_by(self, key: str, *args, **kwargs):\n filter_ = self.filters.get(key)\n if not filter_:\n raise ValueError(key)\n return filter_(*args, **kwargs)", "def search_product_by_name(name, filters):\n return store_handler.search_product_by_name(name, filters)", "def get_filters_values_key(self, project, metric_name, f):\n return u\"{0}-metrics-filter-values:{1}:{2}\".format(project,\n to_unicode(metric_name),\n to_unicode(f))", "def callfilter(self, name, value):\n if name in self._filters:\n return self._filters[name](value)\n else:\n raise Error(\"No such filter: {0}\".format(name))", "def get_by(cls, name, value, keys_only=None):\n return cls.query(getattr(cls, name) == value).get(keys_only=keys_only)", "def match_key(name, func, fallback=None, default=None):\n return key_predicate(name, KeyExtractor(func), fallback, default)", "def lazy_match(name, key_value_tuples):\r\n result = []\r\n for (k, v) in key_value_tuples:\r\n if k.lower().find(name.lower()) == 0:\r\n result.append((k, v))\r\n if len(result) == 0:\r\n print \"%s does not match any options:\" % name\r\n for k, _v in key_value_tuples:\r\n print \"\\t%s\" % k\r\n sys.exit(2)\r\n if len(result) > 1:\r\n print \"%s matched multiple options:\" % name\r\n for k, _v in result:\r\n print \"\\t%s\" % k\r\n sys.exit(2)\r\n return result", "def _filter_from_dict(cls, nm, val):\n #Any necessary filtering place here.\n return val", "def is_filtered(self, key, filter_values):\n return str(key[-1]) in filter_values", "def get_filter_pillar(filter_name, pillar_key=\"acl\", pillarenv=None, saltenv=None):\n pillar_cfg = _get_pillar_cfg(pillar_key, pillarenv=pillarenv, saltenv=saltenv)\n return _lookup_element(pillar_cfg, filter_name)", "def _extract_lookup(self, key):\n parts = key.rsplit(\"__\", 1)\n\n if len(parts) > 1 and parts[1] in operators:\n op = parts[1]\n attribute = parts[0]\n else:\n # 'exact' is the default lookup if there was no explicit comparison op in `key`\n op = \"exact\"\n attribute = key\n\n # Construct and assign the lookup class as a filter criteria\n return attribute, self.get_lookup(op)", "def call_filter(\n self,\n name: str,\n value: t.Any,\n args: t.Optional[t.Sequence[t.Any]] = None,\n kwargs: t.Optional[t.Mapping[str, t.Any]] = None,\n context: t.Optional[Context] = None,\n eval_ctx: t.Optional[EvalContext] = None,\n ) -> t.Any:\n return self._filter_test_common(\n name, value, args, kwargs, context, eval_ctx, True\n )", "def get_filter_name(self):\n pass", "def _s_filter(cls, arg):\n return cls.query.filter_by(name=arg)", "def get_filters_names_key(self, project, metric_name):\n return u\"{0}-metrics-filters:{1}\".format(project, to_unicode(metric_name))", "def get(self, name):\n\n # Fast path: check for a non-conditional param or for a conditional param\n # that was defined in the current scope.\n full_cond_name = self._get_name(name)\n if full_cond_name in self.values:\n if self._conditions_are_active():\n return self.values[full_cond_name]\n else:\n raise ValueError(\n 'Conditional parameter {} is not currently active'.format(\n full_cond_name))\n\n # Check for any active conditional param.\n found_inactive = False\n full_name = self._get_name(name, include_cond=False)\n for name, val in self.values.items():\n hp_parts = self._get_name_parts(name)\n hp_scopes = hp_parts[:-1]\n hp_name = hp_parts[-1]\n hp_full_name = self._get_name(\n hp_name,\n scopes=hp_scopes,\n include_cond=False)\n if full_name == hp_full_name:\n if self._conditions_are_active(hp_scopes):\n return val\n else:\n found_inactive = True\n\n if found_inactive:\n raise ValueError(\n 'Conditional parameter {} is not currently active'.format(\n full_cond_name))\n else:\n raise ValueError(\n 'Unknown parameter: {}'.format(full_name))", "async def get_filter(self, **kwargs: Any) -> str:\n return self._telescope.filter_name", "def get(self, name, **valuefilter):\n if not valuefilter:\n valuefilter = self.valuefilter\n varobj = Variable(name, **valuefilter)\n value = varobj.get(gid=self.gid)\n return value", "def terraform_output_filter(filter, payload):\n if filter in payload:\n return payload[filter]['value']\n else:\n return None", "def test_filter_one_key():\n data = [\n {\n \"name\": \"Bill\",\n \"last_name\": \"Gilbert\",\n \"occupation\": \"was here\",\n \"type\": \"person\",\n },\n {\"is_dead\": True, \"kind\": \"parrot\", \"type\": \"bird\", \"name\": \"polly\"},\n ]\n\n actual_result = make_filter(last_name=\"Gilbert\").apply(data)\n expected_result = [data[0]]\n assert actual_result == expected_result", "def format_search_filter(self, term: event_search.SearchFilter) -> Optional[WhereType]:\n name = term.key.name\n\n converted_filter = self.convert_search_filter_to_condition(\n event_search.SearchFilter(\n # We want to use group_id elsewhere so shouldn't be removed from the dataset\n # but if a user has a tag with the same name we want to make sure that works\n event_search.SearchKey(\"tags[group_id]\" if name == \"group_id\" else name),\n term.operator,\n term.value,\n )\n )\n return converted_filter if converted_filter else None", "def filter_names(self, qs, name, value):\n return qs.filter(name__in=value)", "def lookup(input_field, input_val, output_field):\n l = list(filter(lambda x : x[input_field] == input_val, data))\n if len(l) != 0:\n return l[0][output_field]\n print(\"No entry found for \" + input_field + \": \" + input_val)\n return \"\"", "def get_el_by_name(items: List[Dict[str, Any]], name: str) -> Dict[str, Any]:\n for item in items:\n if item[\"name\"] == name:\n return item\n print(\"error, key name not found by value\", name, \"in list: \", items)\n sys.exit(1)", "def lookup(scopes, name):\n # type: (Scopes[T], str) -> Optional[T]\n\n for scope in scopes:\n for key, val in scope:\n if key == name:\n return val\n return None" ]
[ "0.62365323", "0.61823237", "0.61162287", "0.61162287", "0.61162287", "0.61068785", "0.56777346", "0.55669785", "0.55378616", "0.553224", "0.55188286", "0.55173266", "0.5495687", "0.54905736", "0.54889935", "0.5462702", "0.5452204", "0.53600377", "0.5333365", "0.52472883", "0.52384907", "0.52202004", "0.52017957", "0.5201121", "0.51995134", "0.51910913", "0.51822937", "0.51463866", "0.5142371", "0.51305425" ]
0.7875014
0
Set a limit to indicate the list should be truncated.
def set_limit(self, limit, truncated=False): self.limit = {'limit': limit, 'type': 'limit', 'truncated': truncated}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_limit(self, limit):\n self.limit = limit\n self._prune()", "def limit(self, limit):\n self._limit = limit", "def limit(self, limit):\n raise NotImplementedError(\"This should have been implemented.\")", "def limit(self, limit):\n\n self._limit = limit", "def limit(self, limit):\n\n self._limit = limit", "def limit(self, limit):\n\n self._limit = limit", "def limit(self, limit):\n self._limit = limit\n return self", "def limit(self, limit):\n self._evaluated = False\n self._limit = limit\n return self", "def limit(self, limit):\n self._limit = limit\n\n return self", "def limit(self, limit):\n\n self._limit = limit\n return self", "def set_limit(self, errors):\n self.limit = errors", "def truncation(self, truncate: int) -> None:\n self._truncate = truncate", "def set_max_record_limit(self, limit):\n self.max_record_limit = limit", "def _truncate(self):\n dif = len(self) - self._maxLen\n if dif > 0:\n #return\n self[:dif] = []", "def limit(self, amount):\n self._limit = amount\n return self", "def limit(self, count):\n self._limit = count\n return self", "def SetLimit(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_SetLimit(self, *args)", "def set_Limit(self, value):\n super(DescribeEvaluationsInputSet, self)._set_input('Limit', value)", "def limit(self, limit):\n if limit is None:\n return self\n\n self.query = self.query.limit(limit)\n self._has_limit = True\n return self", "def strict_limit(self, strict_limit):\n\n self._strict_limit = strict_limit", "def set_custom_readings_persistence_limit(self, limit: int): # type: ignore\n self.readings_limit = limit\n return self", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def limit(self, custom_limit):\n # NOTE(gibi): this operation needs escalated privileges (e.g. admin)\n # as the owner of the app cannot set its own app's limits. But\n # authorization is out of scope.\n self._limit = custom_limit", "def limit(self, lim: float):\n if self.mag() > lim:\n self.values = tuple(self.norm()*lim)\n return self", "def set_timelimit(self, timelimit):\n self._timelimit = timelimit", "def limit(self, row_count, offset=0):\n self._limit = (row_count, offset)\n return self", "def limit(self, row_count):\n self._limit = row_count\n return self" ]
[ "0.75690305", "0.74136245", "0.7268453", "0.7227085", "0.7227085", "0.7227085", "0.7150918", "0.7135207", "0.70293975", "0.701623", "0.6900221", "0.68999344", "0.6751625", "0.6742927", "0.6742424", "0.66420597", "0.6582306", "0.64915013", "0.6447758", "0.6411383", "0.6300985", "0.6264307", "0.6264307", "0.6264307", "0.6264307", "0.6263859", "0.6187674", "0.6148729", "0.61480075", "0.6144631" ]
0.79677534
0
Builds and returns a QueryStrategy using a feature extractor and a base_df
def build_query_strategy(sent_df, col_names): # type: (DataFrame, ColumnNames) -> QueryStrategy init_extractor = SynStateALHeuristic.build_feature_extractor(sent_df, col_names) combined_features = init_extractor.transform(sent_df, col_names) return HintSVM(TextDataset(sent_df, col_names, None, features=combined_features), Cl=0.01, p=0.8)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_query_strategy(utility_measure: Callable, selector: Callable) -> Callable:\n def query_strategy(classifier: BaseEstimator, X: modALinput) -> Tuple:\n utility = utility_measure(classifier, X)\n query_idx = selector(utility)\n return query_idx, X[query_idx]\n\n return query_strategy", "def construct_training_data_query(self, operation='training'):\n # FUTURE: make dollar return target/features dynamic\n if self.feature_minutes_list == None or self.trade_window_list == None:\n raise Exception(\"To construct training data query, the optional feature_minutes_list and trade_window_list attributes must be set!\")\n \n feature_col_list = []\n target_col_list = []\n base_ctes_list = []\n feature_cte_list = []\n final_col_list = []\n interaction_features_list = []\n join_conditions_list = []\n\n # Limit rows returned when pulling scoring features\n limit_where_clause = ''\n limit_clause = ''\n if operation == 'scoring':\n limit_minutes = max(self.feature_minutes_list) + 10\n limit_clause = f'LIMIT {limit_minutes}'\n # trying to move away from the where clause - limits are faster\n limit_trade_minute = (time.time() / 60) - limit_minutes - (5*60) \n limit_where_clause = f'AND trade_minute > {limit_trade_minute}'\n elif self.training_period is not None:\n limit_minutes = self.training_period + max(self.feature_minutes_list)\n limit_clause = f'LIMIT {limit_minutes}'\n print(f\"Training data query being limited to the first {limit_minutes} minutes. Training period plus {max(self.feature_minutes_list)} (max feature interval)\")\n # trying to move away from the where clause - limits are faster\n limit_trade_minute = (time.time() / 60) - self.training_period - (5*60)\n limit_where_clause = f'AND trade_minute > {limit_trade_minute}'\n\n\n for pair_type, coin_pair in self.coin_pair_dict.items():\n \"\"\"\n pair_type: 'alt', 'target'\n \"\"\"\n base_features_list = []\n base_ctes_list.append(f\"\"\"\n {pair_type}_{coin_pair}_end_orderbook AS (\n SELECT trade_minute - 1 AS lag_trade_minute, * \n FROM binance.orderbook\n WHERE coin_pair = '{coin_pair}'\n ORDER BY trade_minute DESC \n {limit_clause}\n ),\n {pair_type}_{coin_pair}_beg_orderbook AS (\n SELECT * \n FROM binance.orderbook\n WHERE coin_pair = '{coin_pair}'\n ORDER BY trade_minute DESC \n {limit_clause}\n ),\n {pair_type}_{coin_pair}_candlesticks AS (\n SELECT *\n FROM binance.candledicks c\n WHERE coin_pair = '{coin_pair}'\n ORDER BY trade_minute DESC \n {limit_clause}\n )\"\"\")\n # Base target variable features\n if pair_type == 'target':\n base_features_list.append(f\"\"\"\n c.close_datetime AS {coin_pair}_trade_close_datetime\n , extract(isodow from c.close_datetime) as trade_day_of_week\n , date_part('hour', c.close_datetime) as trade_hour\n , c.close_datetime::date - current_date as days_old\n \"\"\")\n final_col_list.append(f\"\"\"\n {coin_pair}_trade_close_datetime\n , trade_day_of_week\n , trade_hour\n , days_old\n \"\"\")\n feature_col_list.extend(['trade_day_of_week', 'trade_hour', 'days_old'])\n # Base features\n base_features_list.append(f\"\"\"\n c.trade_minute AS {coin_pair}_trade_minute\n , quote_asset_volume as {coin_pair}_quote_asset_volume\n , taker_sell_volume_percentage * 100 AS {coin_pair}_taker_sell_volume_perc_of_total\n , trade_count as {coin_pair}_trade_count\n , o_end.bids_cum_50000_weighted_avg - o_beg.bids_cum_50000_weighted_avg AS {coin_pair}_crnt_interval_bids_50000_price_diff\n , o_end.bids_cum_50000_weighted_avg - o_end.asks_cum_50000_weighted_avg AS {coin_pair}_crnt_interval_bids_v_asks_50000_price_diff \n , o_end.bids_cum_50000_weighted_std - o_beg.bids_cum_50000_weighted_std AS {coin_pair}_crnt_interval_bids_50000_std_diff\n , o_end.bids_cum_50000_weighted_std - o_end.asks_cum_50000_weighted_std AS {coin_pair}_crnt_interval_bids_v_asks_50000_std_diff\n , o_end.bids_cum_50000_weighted_std / (o_end.bids_cum_50000_weighted_std + o_end.asks_cum_50000_weighted_std) AS {coin_pair}_crnt_bids_50000_std_perc_of_total\n , o_end.bids_cum_200000_weighted_std / (o_end.bids_cum_200000_weighted_std + o_end.asks_cum_200000_weighted_std) AS {coin_pair}_crnt_bids_200000_std_perc_of_total\n , (o_end.bids_cum_200000_weighted_std / (o_end.bids_cum_200000_weighted_std + o_end.asks_cum_200000_weighted_std) \n + LEAD(o_end.bids_cum_200000_weighted_std, 1) OVER (ORDER BY c.trade_minute DESC) / (LEAD(o_end.bids_cum_200000_weighted_std, 1) OVER (ORDER BY c.trade_minute DESC) + LEAD(o_end.asks_cum_200000_weighted_std, 1) OVER (ORDER BY c.trade_minute DESC)) \n + LEAD(o_end.bids_cum_200000_weighted_std, 2) OVER (ORDER BY c.trade_minute DESC) / (LEAD(o_end.bids_cum_200000_weighted_std, 2) OVER (ORDER BY c.trade_minute DESC) + LEAD(o_end.asks_cum_200000_weighted_std, 2) OVER (ORDER BY c.trade_minute DESC))\n + LEAD(o_end.bids_cum_200000_weighted_std, 3) OVER (ORDER BY c.trade_minute DESC) / (LEAD(o_end.bids_cum_200000_weighted_std, 3) OVER (ORDER BY c.trade_minute DESC) + LEAD(o_end.asks_cum_200000_weighted_std, 3) OVER (ORDER BY c.trade_minute DESC))\n + LEAD(o_end.bids_cum_200000_weighted_std, 4) OVER (ORDER BY c.trade_minute DESC) / (LEAD(o_end.bids_cum_200000_weighted_std, 4) OVER (ORDER BY c.trade_minute DESC) + LEAD(o_end.asks_cum_200000_weighted_std, 4) OVER (ORDER BY c.trade_minute DESC))\n ) / 5 AS {coin_pair}_bids_200000_std_perc_of_total_avg\n \"\"\")\n final_col_list.append(f\"\"\"\n {coin_pair}_trade_minute\n , {coin_pair}_quote_asset_volume\n , {coin_pair}_taker_sell_volume_perc_of_total\n , {coin_pair}_trade_count\n , {coin_pair}_crnt_interval_bids_50000_price_diff\n , {coin_pair}_crnt_interval_bids_v_asks_50000_price_diff\n , {coin_pair}_crnt_interval_bids_50000_std_diff\n , {coin_pair}_crnt_interval_bids_v_asks_50000_std_diff\n , {coin_pair}_crnt_bids_50000_std_perc_of_total\n , {coin_pair}_crnt_bids_200000_std_perc_of_total\n , {coin_pair}_bids_200000_std_perc_of_total_avg\n \"\"\")\n feature_col_list.extend([\n f'{coin_pair}_quote_asset_volume'\n , f'{coin_pair}_taker_sell_volume_perc_of_total'\n , f'{coin_pair}_trade_count'\n , f'{coin_pair}_crnt_interval_bids_50000_price_diff'\n , f'{coin_pair}_crnt_interval_bids_v_asks_50000_price_diff'\n , f'{coin_pair}_crnt_interval_bids_50000_std_diff'\n , f'{coin_pair}_crnt_interval_bids_v_asks_50000_std_diff'\n , f'{coin_pair}_crnt_bids_50000_std_perc_of_total'\n , f'{coin_pair}_crnt_bids_200000_std_perc_of_total'\n , f'{coin_pair}_bids_200000_std_perc_of_total_avg'\n ])\n \n # Lag features for every interval configured at runtime\n for interval in self.feature_minutes_list:\n interval_list = []\n base_features_list.append(f\"\"\"\n ((quote_asset_volume - LEAD(quote_asset_volume, {interval}) OVER (ORDER BY c.trade_minute DESC)) \n / LEAD(quote_asset_volume, {interval}) OVER (ORDER BY c.trade_minute DESC)) * 100 AS prev_{interval}_{coin_pair}_quote_asset_volume_perc_chg\n , ((taker_sell_volume_percentage - LEAD(taker_sell_volume_percentage, {interval}) OVER (ORDER BY c.trade_minute DESC)) \n / LEAD(taker_sell_volume_percentage, {interval}) OVER (ORDER BY c.trade_minute DESC)) * 100 AS prev_{interval}_{coin_pair}_taker_sell_volume_perc_of_total_chg\n , ((trade_count::float - LEAD(trade_count::float, {interval}) OVER (ORDER BY c.trade_minute DESC)) \n / LEAD(trade_count::float, {interval}) OVER (ORDER BY c.trade_minute DESC)) * 100 AS prev_{interval}_{coin_pair}_trade_count_perc_chg\n , ((o_end.bids_cum_50000_weighted_avg - LEAD(o_end.bids_cum_50000_weighted_avg, {interval}) OVER (ORDER BY c.trade_minute DESC)) \n / LEAD(o_end.bids_cum_50000_weighted_avg, {interval}) OVER (ORDER BY c.trade_minute DESC)) * 100 AS prev_{interval}_{coin_pair}_bids_50000_perc_chg\n , ((o_end.bids_cum_50000_weighted_std - LEAD(o_end.bids_cum_50000_weighted_std, {interval}) OVER (ORDER BY c.trade_minute DESC)) \n / LEAD(o_end.bids_cum_50000_weighted_std, {interval}) OVER (ORDER BY c.trade_minute DESC)) * 100 AS prev_{interval}_{coin_pair}_bids_50000_std_chg\n \"\"\")\n final_col_list.append(f\"\"\"\n prev_{interval}_{coin_pair}_quote_asset_volume_perc_chg\n , prev_{interval}_{coin_pair}_taker_sell_volume_perc_of_total_chg\n , prev_{interval}_{coin_pair}_trade_count_perc_chg\n , prev_{interval}_{coin_pair}_bids_50000_perc_chg\n , prev_{interval}_{coin_pair}_bids_50000_std_chg\n \"\"\") \n feature_col_list.extend([\n f'prev_{interval}_{coin_pair}_quote_asset_volume_perc_chg'\n ,f'prev_{interval}_{coin_pair}_taker_sell_volume_perc_of_total_chg'\n ,f'prev_{interval}_{coin_pair}_trade_count_perc_chg'\n ,f'prev_{interval}_{coin_pair}_bids_50000_perc_chg'\n ,f'prev_{interval}_{coin_pair}_bids_50000_std_chg'\n ])\n \n if pair_type == 'target':\n for target in self.trade_window_list:\n base_features_list.append(f\"\"\"((LAG({self.target_coin}_bids_cum_5000_weighted_avg, {target}) OVER (ORDER BY {self.target_coin}_trade_minute DESC) - {self.target_coin}_asks_cum_5000_weighted_avg) / {self.target_coin}_asks_cum_5000_weighted_avg * 100) AS futr_{target}_askbid_cum_5000_weighted_avg_perc_chg\"\"\")\n # experiment with predicting return starting at minute 1 instead of minute 0 to account for our scoring->trade delay.\n #base_features_list.append(f\"\"\"((LAG({self.target_coin}_bids_cum_5000_weighted_avg, {target}) OVER (ORDER BY {self.target_coin}_trade_minute DESC) - LAG({self.target_coin}_asks_cum_5000_weighted_avg, 1) OVER (ORDER BY {self.target_coin}_trade_minute DESC)) / LAG({self.target_coin}_asks_cum_5000_weighted_avg, 1) OVER (ORDER BY {self.target_coin}_trade_minute DESC) * 100) AS futr_{target}_askbid_cum_5000_weighted_avg_perc_chg\"\"\")\n final_col_list.append(f'futr_{target}_askbid_cum_5000_weighted_avg_perc_chg') \n target_col_list.append(f'futr_{target}_askbid_cum_5000_weighted_avg_perc_chg')\n\n # Coin level CTE \n feature_cte_list.append(f\"\"\"\n {pair_type}_{coin_pair}_features AS (\n SELECT {','.join(base_features_list)}\n FROM {pair_type}_{coin_pair}_candlesticks c \n INNER JOIN {pair_type}_{coin_pair}_beg_orderbook o_beg ON o_beg.coin_pair = c.coin_pair AND o_beg.trade_minute = c.trade_minute \n INNER JOIN {pair_type}_{coin_pair}_end_orderbook o_end ON o_end.coin_pair = c.coin_pair AND o_end.lag_trade_minute = c.trade_minute\n )\"\"\")\n\n # Interaction features for alt coins (base usdt)\n interaction_features = ''\n if pair_type == 'alt':\n interaction_features_list.append(f\"\"\"AVG(({self.target_coin}_bid_ask_average_price-{coin_pair}_bid_ask_average_price)/{self.target_coin}_bid_ask_average_price) OVER (PARTITION BY {self.target_coin}_coin_partition ORDER BY {self.target_coin}_trade_minute ASC ROWS 5 PRECEDING) \n - (({self.target_coin}_bid_ask_average_price-{coin_pair}_bid_ask_average_price)/{self.target_coin}_bid_ask_average_price) AS avg_5_{coin_pair}_bid_ask_average_price_interaction\"\"\")\n interaction_features_list.append(f\"\"\"AVG(({self.target_coin}_bid_ask_average_price-{coin_pair}_bid_ask_average_price)/{self.target_coin}_bid_ask_average_price) OVER (PARTITION BY {self.target_coin}_coin_partition ORDER BY {self.target_coin}_trade_minute ASC ROWS 10 PRECEDING) \n - (({self.target_coin}_bid_ask_average_price-{coin_pair}_bid_ask_average_price)/{self.target_coin}_bid_ask_average_price) AS avg_10_{coin_pair}_bid_ask_average_price_interaction\"\"\")\n interaction_features_list.append(f\"\"\"AVG(({self.target_coin}_bid_ask_average_price-{coin_pair}_bid_ask_average_price)/{self.target_coin}_bid_ask_average_price) OVER (PARTITION BY {self.target_coin}_coin_partition ORDER BY {self.target_coin}_trade_minute ASC ROWS 20 PRECEDING) \n - (({self.target_coin}_bid_ask_average_price-{coin_pair}_bid_ask_average_price)/{self.target_coin}_bid_ask_average_price) AS avg_20_{coin_pair}_bid_ask_average_price_interaction\"\"\")\n feature_col_list.extend([f'avg_5_{coin_pair}_bid_ask_average_price_interaction',f'avg_10_{coin_pair}_bid_ask_average_price_interaction',f'avg_20_{coin_pair}_bid_ask_average_price_interaction'])\n interaction_features = ','.join(interaction_features_list)\n interaction_features = ',' + interaction_features\n\n # Join conditions\n if pair_type == 'target':\n join_conditions_list.append(f\"\"\"{pair_type}_{coin_pair}_features\"\"\") \n else:\n join_conditions_list.append(f\"\"\"{pair_type}_{coin_pair}_features ON target_{self.target_coin}_features.{self.target_coin}_trade_minute = {pair_type}_{coin_pair}_features.{coin_pair}_trade_minute\"\"\")\n\n base_ctes = ','.join(base_ctes_list)\n feature_ctes = ','.join(feature_cte_list)\n feature_ctes = ',' + feature_ctes\n final_cols = ','.join(final_col_list)\n join_conditions = ' LEFT JOIN '.join(join_conditions_list)\n\n query_template = f\"\"\"WITH {base_ctes}\n {feature_ctes}\n SELECT {final_cols}\n {interaction_features}\n FROM {join_conditions}\n ORDER BY {self.target_coin}_trade_minute {'DESC' if operation == 'scoring' else 'ASC'}\n {'LIMIT 1' if operation == 'scoring' else ''}\"\"\" # LIMIT SCORING DATA - NOT ALL DATA IS RELEVANT TO CURRENT\n\n return query_template, feature_col_list, target_col_list", "def _make_query(self):\r\n raise NotImplementedError()", "def load_data(base_path, experiment_name, train_split_fraction, no_operators, no_sample_tuples, expansion_factor,\n dim_predicate_embedding, size_threshold):\n filename_prefix = path.join(base_path, \"data\", experiment_name)\n parser = Parser(no_operators, no_sample_tuples, dim_predicate_embedding)\n table_rows = pd.read_csv(filename_prefix + \".csv\")['tablerows']\n table_sizes_kb = pd.read_csv(filename_prefix + \".csv\")['tablesizekb']\n queries, num_colums, num_tables, tables_to_columns = parser.parse_file(filename_prefix + \".json\")\n\n # Shuffle for train-test-split -- queries based on the same plan can only end in train or test set\n random.shuffle(queries)\n\n # Postprocess Queries\n expanded_queries = []\n for query in queries:\n q_operator_node, q_runtime, q_index = query\n\n # Only consider tables with size over a certain threshold\n if table_sizes_kb[q_index] >= size_threshold:\n # Normalize runtime by table sizes otherwise it does not work\n query = (q_operator_node, q_runtime / table_rows[q_index], q_index)\n\n # Expand/vary queries\n for i in range(expansion_factor):\n query_copy = deepcopy(query)\n vary_query(query_copy[0], num_colums, tables_to_columns)\n expanded_queries.append(query_copy)\n\n split_point = int(len(expanded_queries) * train_split_fraction)\n return expanded_queries[:split_point], expanded_queries[split_point:], num_colums, num_tables", "def df_builder(selector: str, parameter: str, league_name: str) -> pd.DataFrame:\n assert isinstance(selector, str), 'Selector needs to be a string.'\n assert isinstance(parameter, str), 'parameter needs to be a string.'\n assert isinstance(league_name, str), 'league_name needs to be a string.'\n\n\n query = f'''SELECT club, {selector} AS {parameter}\n FROM team_stats \n WHERE league = '{league_name}'\n GROUP BY club\n ORDER BY {parameter} DESC;'''\n\n engine = create_engine('postgresql:///soccer_database')\n df = pd.read_sql_query(query, engine)\n\n return df", "def make_slicer_query(\n self,\n base_table: Table,\n joins: Sequence[Join] = (),\n dimensions: Sequence[Field] = (),\n metrics: Sequence[Field] = (),\n filters: Sequence[Filter] = (),\n orders: Sequence = (),\n ) -> Type[QueryBuilder]:\n query = self.query_cls.from_(base_table, immutable=False)\n elements = flatten([metrics, dimensions, filters])\n\n # Add joins\n join_tables_needed_for_query = find_required_tables_to_join(elements, base_table)\n\n for join in find_joins_for_tables(joins, base_table, join_tables_needed_for_query):\n query = query.join(join.table, how=join.join_type).on(join.criterion)\n\n # Add dimensions\n for dimension in dimensions:\n dimension_term = self.transform_field_to_query(dimension, self.trunc_date)\n query = query.select(dimension_term)\n\n if dimension.groupable:\n query = query.groupby(dimension_term)\n\n # Add filters\n for fltr in filters:\n query = query.having(fltr.definition) if fltr.is_aggregate else query.where(fltr.definition)\n\n # Add metrics\n metric_terms = [self.transform_field_to_query(metric) for metric in metrics]\n if metric_terms:\n query = query.select(*metric_terms)\n\n # In the case that the orders are determined by a field that is not selected as a metric or dimension, then it needs\n # to be added to the query.\n select_aliases = {el.alias for el in query._selects}\n for (orderby_field, orientation) in orders:\n orderby_term = self.transform_field_to_query(orderby_field)\n query = query.orderby(orderby_term, order=orientation)\n\n if orderby_term.alias not in select_aliases:\n query = query.select(orderby_term)\n\n return query", "def _prepare_query(\n self,\n string: str,\n *,\n rank: bool = True,\n exact_match: bool = False,\n fuzziness: str = 'AUTO',\n prefix_length: int = 0,\n max_expansions: int = 50,\n transpositions: bool = True,\n ) -> Dict[str, Any]:\n if exact_match:\n query_body = {\n 'match': {\n 'term': string\n }\n }\n else:\n query_body = {\n 'fuzzy': {\n 'term': {\n 'value': string,\n 'fuzziness': fuzziness,\n 'prefix_length': prefix_length,\n 'max_expansions': max_expansions,\n 'transpositions': transpositions,\n }\n }\n }\n\n query = {'sort': [{'_score': 'desc'}]} if rank else {}\n query.update({'query': query_body})\n return query", "def __init__(self):\n BDLQuery.__init__(self)\n self.pandas_df = []", "def get_dataset(self):\n if self.mode == \"test\":\n return OnlineQueryDataset(self.mode, self.df, self.tokenizer)\n else:\n return OnlineQueryDataset(self.mode, self.df_reindex, self.tokenizer)", "def extract_data_from_DB(query, dao_object, *query_params):\n\n local_query = None\n\n if(len(query_params) == 0):\n local_query = query\n else:\n local_query = query % query_params\n\n #print(local_query)\n\n # Extract data\n #output_df = 0\n output_df = pd.DataFrame(dao_object.get(local_query))\n column_names = dao_object.get_column_name()\n output_df.columns = column_names\n\n return output_df", "def make_query(self):", "def _generate_features(self, feature_extractors):\n results = [pd.DataFrame()]\n n_ext = len(feature_extractors)\n\n for i, extractor in enumerate(feature_extractors):\n log.info(\"generating: '%s' (%d/%d)\", extractor.name, i + 1, n_ext)\n cached_extractor = self._cache[extractor.name]\n if extractor.same(cached_extractor):\n log.info('pulling from cache')\n extractor = cached_extractor\n else:\n log.info('running...')\n extractor.extract()\n results.append(extractor.result)\n if self.cache_path:\n self._cache[extractor.name] = extractor\n\n if self.cache_path:\n with open(self.cache_path, 'wb') as f:\n pickle.dump(self._cache, f)\n\n return pd.concat(results, axis=1)", "def _extractor_factory(schema_builder):\n return ExtractorBuilder(schema_builder)", "def input_fn():\n # It's important to build all the tensors together in one DataFrame.\n # If we did df.select() for both key sets and then build those, the two\n # resulting DataFrames would be shuffled independently.\n tensors = limited_dataframe.build(**kwargs)\n\n base_input_features = {key: tensors[key] for key in base_input_keys}\n labels = {key: tensors[key] for key in label_keys}\n\n # TODO(soergel): Remove this special case when b/30367437 is fixed.\n if len(labels) == 1:\n labels = list(labels.values())[0]\n\n return base_input_features, labels", "def add_features(df):\n \n assert df.columns.str.contains(\"query|value|keyword|ranking|timestamp|geo\").all(), \"Add features failed. \\\n Missing one of [query, value, keyword, ranking, timestamp, geo]\"\n \n # feature engineering: totals and normalize\n grouped = df.groupby(['ranking']).value # group values by ranking\n df['value_total'] = grouped.transform('sum') # total sum \n df['value_normalized'] = (df.value-grouped.transform('min'))/(grouped.transform('max')-grouped.transform('min')) # normalize \n df['value_normalized_total'] = df.groupby(['ranking']).value_normalized.transform('sum') # total sum of normalized values \n df['date'] = pd.to_datetime(df.query_timestamp).dtd\n \n return df", "def gen_query(self, criteria, target_id, for_stats=False):\n query = self.db.session.query(models.Url).filter_by(target_id=target_id)\n # Check if criteria is url search\n if criteria.get('search', None):\n if criteria.get('url', None):\n if isinstance(criteria.get('url'), list):\n criteria['url'] = criteria['url'][0]\n query = query.filter(models.Url.url.like('%%%s%%' % criteria['url']))\n else: # If not search\n if criteria.get('url', None):\n if isinstance(criteria.get('url'), str):\n query = query.filter_by(url=criteria['url'])\n if isinstance(criteria.get('url'), list):\n query = query.filter(models.Url.url.in_(criteria['url']))\n # For the following section doesn't matter if filter/search because\n # it doesn't make sense to search in a boolean column :P\n if criteria.get('visited', None):\n if isinstance(criteria.get('visited'), list):\n criteria['visited'] = criteria['visited'][0]\n query = query.filter_by(visited=self.config.ConvertStrToBool(criteria['visited']))\n if criteria.get('scope', None):\n if isinstance(criteria.get('scope'), list):\n criteria['scope'] = criteria['scope'][0]\n query = query.filter_by(scope=self.config.ConvertStrToBool(criteria['scope']))\n if not for_stats: # Query for stats can't have limit and offset\n try:\n if criteria.get('offset', None):\n if isinstance(criteria.get('offset'), list):\n criteria['offset'] = criteria['offset'][0]\n query = query.offset(int(criteria['offset']))\n if criteria.get('limit', None):\n if isinstance(criteria.get('limit'), list):\n criteria['limit'] = criteria['limit'][0]\n query = query.limit(int(criteria['limit']))\n except ValueError:\n raise InvalidParameterType(\"Invalid parameter type for transaction db\")\n return query", "def query(_from, _select, _geomselect=None, _where=None, _groupby=None, _limit=None):\n # INSTEAD MAKE INTO CLASS\n # WITH .fields attr\n # AND .__iter__()\n # AND .get_vectordata()\n # AND MAKE EACH YIELDED ROW A VECTOR FEATURE CLASS\n # THIS WAY ALLOWING CHAINED QUERIES\n\n # parse args\n iterables = _from\n columnfuncs = _select\n geomfunc = _geomselect\n condition = _where\n key = _groupby\n n = _limit\n \n # first yield header as list of column names\n colnames = [each[0] for each in columnfuncs]\n yield colnames\n\n # make an iterable that yields every combinaion of all input iterables' items\n if len(iterables) == 1:\n iterable = iterables[0]\n else:\n iterable = itertools.product(*iterables)\n\n # iterate and add\n if key:\n groups = groupby(iterable, key)\n\n # limit\n if n:\n groups = limit(groups, n)\n \n for items in groups:\n # filter\n if condition:\n items = where(items, condition)\n \n # aggregate\n # NOTE: columnfuncs and geomfunc must expect an iterable as input and return a single row,geom pair\n item = aggreg(items, columnfuncs, geomfunc)\n yield item\n \n else:\n # filter\n if condition:\n iterable = where(iterable, condition)\n\n # limit\n if n:\n iterable = limit(iterable, n)\n\n # select\n for item in select(iterable, columnfuncs, geomfunc):\n yield item", "def _get_combined_df(self, **kwargs):\n\n columns = kwargs.pop('columns', ['default'])\n state = kwargs.pop('state', '')\n user_query = kwargs.pop('query_str', '')\n hostname = kwargs.pop('hostname', [])\n\n cols = self.schema.get_display_fields(columns)\n self._add_active_to_fields(kwargs.get('view', self.iobj.view), cols,\n None)\n\n user_query_cols = self._get_user_query_cols(user_query)\n\n ifschema = SchemaForTable('ospfIf', schema=self.all_schemas)\n nbrschema = SchemaForTable('ospfNbr', schema=self.all_schemas)\n\n if columns not in [['default'], ['*']]:\n ifkeys = ifschema.key_fields()\n nbrkeys = nbrschema.key_fields()\n if_flds = ifschema.fields\n nbr_flds = nbrschema.fields\n\n ifcols = ifkeys\n nbrcols = nbrkeys\n for fld in columns:\n if fld in if_flds and fld not in ifcols:\n ifcols.append(fld)\n elif fld in nbr_flds and fld not in nbrcols:\n nbrcols.append(fld)\n if 'state' not in nbrcols:\n nbrcols.append('state')\n ifcols += [x for x in ['area', 'state', 'passive']\n if x not in ifcols]\n else:\n ifcols = ifschema.get_display_fields(columns)\n nbrcols = nbrschema.get_display_fields(columns)\n\n ifcols += [x for x in user_query_cols if (x in ifschema.fields\n and x not in ifcols)]\n self._add_active_to_fields(kwargs.get('view', 'latest'), ifcols,\n None)\n nbrcols += [x for x in user_query_cols if (x in nbrschema.fields\n and x not in nbrcols)]\n self._add_active_to_fields(kwargs.get('view', 'latest'), nbrcols,\n None)\n\n if 'timestamp' not in ifcols:\n ifcols.append('timestamp')\n\n if 'timestamp' not in nbrcols:\n nbrcols.append('timestamp')\n\n state_query_dict = {\n 'full': '(adjState == \"full\" or adjState == \"passive\")',\n 'passive': '(adjState == \"passive\")',\n 'other': '(adjState != \"full\" and adjState != \"passive\")',\n '!full': '(adjState != \"full\")',\n '!passive': '(adjState != \"passive\")',\n '!other': '(adjState == \"full\" or adjState == \"passive\")',\n }\n\n if state:\n query_str = state_query_dict.get(state, '')\n cond_prefix = ' and '\n else:\n query_str = ''\n cond_prefix = ''\n\n host_query_str = build_query_str([], ifschema, ignore_regex=False,\n hostname=hostname)\n if host_query_str:\n query_str += f'{cond_prefix}{host_query_str}'\n\n df = self._get_table_sqobj('ospfIf') \\\n .get(columns=ifcols, **kwargs)\n nbr_df = self._get_table_sqobj('ospfNbr') \\\n .get(columns=nbrcols, **kwargs)\n if nbr_df.empty:\n return df\n\n merge_cols = [x for x in ['namespace', 'hostname', 'ifname']\n if x in nbr_df.columns]\n # Merge the two tables\n df = df.merge(nbr_df, on=merge_cols, how='left') \\\n .fillna({'peerIP': '-', 'numChanges': 0,\n 'lastChangeTime': 0}) \\\n .fillna('')\n\n # This is because some NOS have the ipAddress in nbr table and some in\n # interface table. Nbr table wins over interface table if present\n if 'ipAddress_y' in df:\n df['ipAddress'] = np.where(\n df['ipAddress_x'] == \"\",\n df['ipAddress_y'], df['ipAddress_x'])\n\n df = df.rename(columns={\n 'instance_x': 'instance', 'areaStub_x': 'areaStub',\n 'area_x': 'area', 'vrf_x': 'vrf',\n 'state_x': 'ifState', 'state_y': 'adjState',\n 'active_x': 'active', 'timestamp_x': 'timestamp'})\n\n df = df.drop(columns=list(df.filter(regex='_y$|_x$')),\n errors='ignore') \\\n .fillna({'peerIP': '-', 'numChanges': 0,\n 'lastChangeTime': 0})\n\n if df.empty:\n return df\n\n if 'state' in df.columns:\n # Need this logic if the user specfies only one of adjState\n # or ifState in the columns field. The above renaming of state_x\n # and state_y will not work in that case, and this is what we\n # have to do.\n if 'ifState' in cols:\n df = df.rename(columns={'state': 'ifState'})\n else:\n df = df.rename(columns={'state': 'adjState'})\n if 'lastChangeTime' in df.columns:\n df['lastChangeTime'] = np.where(df.lastChangeTime == '-',\n 0, df.lastChangeTime)\n # Fill the adjState column with passive if passive\n if 'passive' in df.columns and 'adjState' in df.columns:\n df.loc[df['adjState'] == '', 'adjState'] = df['passive']\n df.loc[df['adjState'].eq(True), 'adjState'] = 'passive'\n df.loc[df['adjState'].eq(False), 'adjState'] = 'fail'\n df.loc[df['adjState'] == 'passive', 'peerIP'] = ''\n df.loc[df['adjState'] == 'passive', 'peerRouterId'] = ''\n\n df.drop(columns=['passive'], inplace=True)\n\n final_df = df\n if 'peerHostname' in cols or 'peerIfname' in cols:\n final_df = self._get_peernames(final_df, cols, hostname=hostname,\n **kwargs)\n if query_str:\n final_df = final_df.query(query_str).reset_index(drop=True)\n\n if user_query and not final_df.empty:\n final_df = self._handle_user_query_str(final_df, user_query)\n # Move the timestamp column to the end\n\n return final_df.reset_index(drop=True)[cols]", "def engineer_features(\n df_org: pd.DataFrame,\n start_features: list | None = None,\n units: dict | None = None,\n max_steps: int = 3,\n transformations: list | tuple = (\"1/\", \"exp\", \"log\", \"abs\", \"sqrt\", \"^2\", \"^3\"),\n verbose: int = 0,\n) -> tuple[pd.DataFrame, dict]:\n # initialize the feature pool with columns from the dataframe\n if not start_features:\n start_features = df_org.columns\n else:\n for c in start_features:\n if c not in df_org.columns:\n raise ValueError(f\"[feateng] start feature {c} not in df_org.columns\")\n feature_pool = {c: sympy.symbols(colnames2symbols(c, i), real=True) for i, c in enumerate(start_features)} # type: ignore\n if max_steps < 1:\n if verbose > 0:\n logging.warning(\"[feateng] no features generated for max_steps < 1.\")\n return df_org, feature_pool\n # get a copy of the dataframe - this is where all the features will be added\n df = pd.DataFrame(df_org.copy(), dtype=np.float32)\n\n compiled_func_transformations = None\n compiled_func_transforms_cond = None\n compiled_func_combinations = None\n\n def compile_func_transform(name: str, ft: Callable, plus_1: bool = False):\n def _abs(x):\n return np.abs(x)\n\n # create temporary variable expression and apply it to precomputed feature\n t = sympy.symbols(\"t\")\n expr_temp = ft(t + 1) if plus_1 else ft(t)\n fn = _abs if name == \"abs\" else lambdify(t, expr_temp)\n return nb.njit(fn)\n\n def apply_transformations(features_list: list) -> tuple[list, set]:\n # feature transformations\n func_transform = {\n \"exp\": lambda x: sympy.exp(x),\n \"exp-\": lambda x: sympy.exp(-x),\n \"log\": lambda x: sympy.log(x),\n \"abs\": lambda x: sympy.Abs(x),\n \"sqrt\": lambda x: sympy.sqrt(x),\n \"sin\": lambda x: sympy.sin(x),\n \"cos\": lambda x: sympy.cos(x),\n \"2^\": lambda x: 2**x,\n \"^2\": lambda x: x**2,\n \"^3\": lambda x: x**3,\n \"1+\": lambda x: 1 + x,\n \"1-\": lambda x: 1 - x,\n \"1/\": lambda x: 1 / x,\n }\n func_transform_units = {\n \"exp\": lambda x: np.exp(x),\n \"exp-\": lambda x: np.exp(-x),\n \"log\": lambda x: np.log(x),\n \"abs\": lambda x: np.abs(x),\n \"sqrt\": lambda x: np.sqrt(x),\n \"sin\": lambda x: np.sin(x),\n \"cos\": lambda x: np.cos(x),\n \"2^\": lambda x: np.exp(x),\n \"^2\": lambda x: x**2,\n \"^3\": lambda x: x**3,\n \"1+\": lambda x: 1 + x,\n \"1-\": lambda x: 1 - x,\n \"1/\": lambda x: 1 / x,\n }\n # conditions on the original features that have to be met to apply the transformation\n func_transform_cond = {\n \"exp\": lambda x: np.all(x < 10),\n \"exp-\": lambda x: np.all(-x < 10),\n \"log\": lambda x: np.all(x >= 0),\n \"abs\": lambda x: np.any(x < 0),\n \"sqrt\": lambda x: np.all(x >= 0),\n \"sin\": lambda x: True,\n \"cos\": lambda x: True,\n \"2^\": lambda x: np.all(x < 50),\n \"^2\": lambda x: np.all(np.abs(x) < 1000000),\n \"^3\": lambda x: np.all(np.abs(x) < 10000),\n \"1+\": lambda x: True,\n \"1-\": lambda x: True,\n \"1/\": lambda x: np.all(x != 0),\n }\n # apply transformations to the features in the given features list\n # modifies global variables df and feature_pool!\n nonlocal df, feature_pool, units\n nonlocal compiled_func_transformations, compiled_func_transforms_cond\n\n if compiled_func_transformations is None:\n compiled_func_transformations = {k: compile_func_transform(k, v) for k, v in func_transform.items()}\n compiled_func_transformations[\"log_plus_1\"] = compile_func_transform(\"log\", func_transform[\"log\"], plus_1=True)\n\n compiled_func_transforms_cond = {x[0]: nb.njit(x[1]) for x in func_transform_cond.items()}\n\n # returns a list of new features that were generated\n new_features: list[str] = []\n uncorr_features = set()\n # store all new features in a preallocated numpy array before adding it to the dataframe\n feat_array = np.zeros((df.shape[0], len(features_list) * len(transformations)), dtype=np.float32)\n cat_features = {feat for feat in features_list if len(df[feat].unique()) <= 2}\n func_transform_cond_cache = {} # Cache for compiled_func_transforms_cond checks\n for i, feat in enumerate(features_list):\n if verbose and not i % 100:\n print(f\"[feateng] {i:15}/{len(features_list):15} features transformed\", end=\"\\r\")\n for ft in transformations:\n # (don't compute transformations on categorical features)\n if feat in cat_features:\n continue\n # check if transformation is valid for particular feature (i.e. given actual numerical values)\n cache_key = (ft, feat)\n if cache_key not in func_transform_cond_cache:\n func_transform_cond_cache[cache_key] = compiled_func_transforms_cond[ft](df[feat].to_numpy())\n if func_transform_cond_cache[cache_key]:\n # get the expression (based on the primary features)\n expr = func_transform[ft](feature_pool[feat])\n expr_name = str(expr)\n # we're simplifying expressions, so we might already have that one\n if expr_name not in feature_pool:\n # if we're given units, check if the operation is legal\n if units:\n try:\n units[expr_name] = func_transform_units[ft](units[feat])\n units[expr_name].__dict__[\"_magnitude\"] = 1.0\n except (pint.DimensionalityError, pint.OffsetUnitCalculusError):\n continue\n feature_pool[expr_name] = expr\n if expr == \"log\" and np.any(df[feat] < 1):\n f = compiled_func_transformations[\"log_plus_1\"]\n else:\n f = compiled_func_transformations[ft]\n new_feat = np.array(f(df[feat].to_numpy()), dtype=np.float32)\n # near 0 variance test - sometimes all that's left is \"e\"\n if np.isfinite(new_feat).all() and np.var(new_feat) > 1e-10:\n corr = abs(np.corrcoef(new_feat, df[feat])[0, 1])\n if corr < 1.0:\n feat_array[:, len(new_features)] = new_feat\n new_features.append(expr_name)\n # correlation test: don't include features that are basically the same as the original features\n # but we only filter them out at the end, since they still might help in other steps!\n if corr < 0.95:\n uncorr_features.add(expr_name)\n if verbose > 0:\n logging.info(\n f\"[feateng] Generated {len(new_features)} transformed features from {len(features_list)} original features - done.\",\n )\n df = df.join(pd.DataFrame(feat_array[:, : len(new_features)], columns=new_features, index=df.index, dtype=np.float32))\n return new_features, uncorr_features\n\n def compile_func_combinations(func_combinations: dict) -> dict:\n d = {}\n for fc in func_combinations:\n s, t = sympy.symbols(\"s t\")\n expr_temp = func_combinations[fc](s, t)\n fn = lambdify((s, t), expr_temp)\n vect = nb.vectorize([\"float32(float32, float32)\"], nopython=True)\n d[fc] = vect(fn)\n return d\n\n def get_feature_combinations(feature_tuples: list) -> tuple[list, set]:\n # new features as combinations of two other features\n func_combinations = {\n \"x+y\": lambda x, y: x + y,\n \"x*y\": lambda x, y: x * y,\n \"x-y\": lambda x, y: x - y,\n \"y-x\": lambda x, y: y - x,\n }\n # get all feature combinations for the given feature tuples\n # modifies global variables df and feature_pool!\n nonlocal df, feature_pool, units, compiled_func_combinations\n\n if compiled_func_combinations is None:\n compiled_func_combinations = compile_func_combinations(func_combinations)\n\n # only compute all combinations if there are more transformations applied afterwards\n # additions at the highest level are sorted out later anyways\n combinations = [\"x*y\"] if steps == max_steps else list(func_combinations.keys())\n # returns a list of new features that were generated\n new_features: list[str] = []\n uncorr_features = set()\n # store all new features in a preallocated numpy array before adding it to the dataframe\n feat_array = np.zeros((df.shape[0], len(feature_tuples) * len(combinations)), dtype=np.float32)\n for i, (feat1, feat2) in enumerate(feature_tuples):\n if verbose and not i % 100:\n print(f\"[feateng] {i:15}/{len(feature_tuples):15} feature tuples combined\", end=\"\\r\")\n for fc in combinations:\n expr = func_combinations[fc](feature_pool[feat1], feature_pool[feat2])\n expr_name = str(expr)\n if expr_name not in feature_pool:\n # if we're given units, check if the operation is legal\n if units:\n try:\n units[expr_name] = func_combinations[fc](units[feat1], units[feat2])\n units[expr_name].__dict__[\"_magnitude\"] = 1.0\n except (pint.DimensionalityError, pint.OffsetUnitCalculusError):\n continue\n feature_pool[expr_name] = expr\n f = compiled_func_combinations[fc]\n new_feat = np.array(f(df[feat1].to_numpy(), df[feat2].to_numpy()), dtype=np.float32)\n # near 0 variance test - sometimes all that's left is \"e\"\n if np.isfinite(new_feat).all() and np.var(new_feat) > 1e-10:\n corr = max(abs(np.corrcoef(new_feat, df[feat1])[0, 1]), abs(np.corrcoef(new_feat, df[feat2])[0, 1]))\n if corr < 1.0:\n feat_array[:, len(new_features)] = new_feat\n new_features.append(expr_name)\n # correlation test: don't include features that are basically the same as the original features\n # but we only filter them out at the end, since they still might help in other steps!\n if corr < 0.95:\n uncorr_features.add(expr_name)\n if verbose > 0:\n logging.info(\n f\"[feateng] Generated {len(new_features)} feature combinations from {len(feature_tuples)} original feature tuples - done.\",\n )\n df = df.join(pd.DataFrame(feat_array[:, : len(new_features)], columns=new_features, index=df.index, dtype=np.float32))\n return new_features, uncorr_features\n\n # get transformations of initial features\n steps = 1\n if verbose > 0:\n logging.info(\"[feateng] Step 1: transformation of original features\")\n original_features = list(feature_pool.keys())\n uncorr_features = set(feature_pool.keys())\n temp_new, temp_uncorr = apply_transformations(original_features)\n original_features.extend(temp_new)\n uncorr_features.update(temp_uncorr)\n steps += 1\n # get combinations of first feature set\n if steps <= max_steps:\n if verbose > 0:\n logging.info(\"[feateng] Step 2: first combination of features\")\n new_features, temp_uncorr = get_feature_combinations(list(combinations(original_features, 2)))\n uncorr_features.update(temp_uncorr)\n steps += 1\n while steps <= max_steps:\n # apply transformations on these new features\n if verbose > 0:\n logging.info(f\"[feateng] Step {steps}: transformation of new features\")\n temp_new, temp_uncorr = apply_transformations(new_features)\n new_features.extend(temp_new)\n uncorr_features.update(temp_uncorr)\n steps += 1\n # get combinations of old and new features\n if steps <= max_steps:\n if verbose > 0:\n logging.info(f\"[feateng] Step {steps}: combining old and new features\")\n new_new_features, temp_uncorr = get_feature_combinations(list(product(original_features, new_features)))\n uncorr_features.update(temp_uncorr)\n steps += 1\n # and combinations of new features within themselves\n if steps <= max_steps:\n if verbose > 0:\n logging.info(f\"[feateng] Step {steps}: combining new features\")\n temp_new, temp_uncorr = get_feature_combinations(list(combinations(new_features, 2)))\n new_new_features.extend(temp_new)\n uncorr_features.update(temp_uncorr)\n steps += 1\n # update old and new features and repeat\n original_features.extend(new_features)\n new_features = new_new_features\n\n # sort out all features that are just additions on the highest level or correlated with more basic features\n if verbose > 0:\n logging.info(f\"[feateng] Generated altogether {len(feature_pool) - len(start_features)} new features in {max_steps} steps\") # type: ignore\n logging.info(\"[feateng] Removing correlated features, as well as additions at the highest level\")\n feature_pool = {\n c: feature_pool[c] for c in feature_pool if c in uncorr_features and feature_pool[c].func != sympy.core.add.Add\n }\n cols = [\n c for c in list(df.columns) if c in feature_pool and c not in df_org.columns\n ] # categorical cols not in feature_pool\n if cols:\n # check for correlated features again; this time with the start features\n corrs = dict(\n zip(\n cols,\n np.max(\n np.abs(\n np.dot(StandardScaler().fit_transform(df[cols]).T, StandardScaler().fit_transform(df_org))\n / df_org.shape[0],\n ),\n axis=1,\n ),\n ),\n )\n cols = [c for c in cols if corrs[c] < 0.9]\n cols = list(df_org.columns) + cols\n if verbose > 0:\n logging.info(f\"[feateng] Generated a total of {len(feature_pool) - len(start_features)} additional features\") # type: ignore\n return df[cols], feature_pool", "def __init__(self, data, aggregate=np.mean, base=None, time_slices=None):\n\n # Check that the arguments have the right types\n assert isinstance(data, pd.DataFrame)\n\n self.data = data\n self.aggregate = aggregate\n self.time_slices = time_slices\n # Not sure that this is the right way to do it - I feel like we're outsmarting pandas\n # pandas supports multiple keys in a dataframe, whereas this only allows one.\n # Should we replace FeatureCollection with something like that?\n self.name = data.keys()[0]\n\n if base is not None:\n assert isinstance(base, Feature)\n\n self.base = base", "def get_merged_df(base_path, feature_methods):\n features = [pd.read_csv(get_feature_path(method, base_path)) for method in feature_methods]\n return merge_dfs(features)", "def _build_estimator(config, hidden_units=None, warm_start_from=None):\n real_valued_columns = [\n tf.feature_column.numeric_column(key, shape=())\n for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)\n ]\n categorical_columns = [\n tf.feature_column.categorical_column_with_identity(\n key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)\n for key in _transformed_names(_VOCAB_FEATURE_KEYS)\n ]\n categorical_columns += [\n tf.feature_column.categorical_column_with_identity(\n key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)\n for key in _transformed_names(_BUCKET_FEATURE_KEYS)\n ]\n categorical_columns += [\n tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension\n key,\n num_buckets=num_buckets,\n default_value=0) for key, num_buckets in zip(\n _transformed_names(_CATEGORICAL_FEATURE_KEYS),\n _MAX_CATEGORICAL_FEATURE_VALUES)\n ]\n return tf.estimator.DNNLinearCombinedClassifier(\n config=config,\n linear_feature_columns=categorical_columns,\n dnn_feature_columns=real_valued_columns,\n dnn_hidden_units=hidden_units or [100, 70, 50, 25],\n warm_start_from=warm_start_from)", "def establish_query(query_func, input_size):\n return lambda input_data: query_func(reshape_input(input_data, input_size))", "def build_dataset_search(self, dataset_dir, is_training, is_shuffle):\n pass", "def get(self, **kwargs) -> pd.DataFrame:\n\n addr = kwargs.pop(\"address\", [])\n prefix = kwargs.pop(\"prefix\", [])\n columns = kwargs.pop(\"columns\", [])\n ipvers = kwargs.pop(\"ipvers\", \"\")\n user_query = kwargs.pop(\"query_str\", \"\")\n\n if user_query:\n if user_query.startswith('\"') and user_query.endswith('\"'):\n user_query = user_query[1:-1]\n\n vrf = kwargs.pop(\"vrf\", \"\")\n\n addnl_fields = []\n fields = self.schema.get_display_fields(columns)\n self._add_active_to_fields(kwargs.get('view', self.iobj.view),\n fields, addnl_fields)\n\n if prefix:\n addr_types = self.addr_type(prefix)\n else:\n addr_types = self.addr_type(addr)\n\n # Always include ip or mac addresses in the dataframe\n # if there is a filter on them\n\n for x in ['ipAddressList', 'ip6AddressList', 'macaddr']:\n if x not in fields:\n addnl_fields.append(x)\n\n user_query_cols = self._get_user_query_cols(user_query)\n addnl_fields += [x for x in user_query_cols if x not in addnl_fields]\n\n df = super().get(addnl_fields=addnl_fields, columns=fields,\n **kwargs)\n\n if df.empty:\n return df\n\n if 'master' in df.columns:\n df = df.rename({'master': 'vrf'}, axis=1) \\\n .replace({'vrf': {'': 'default'}})\n df.loc[(df.vrf == 'bridge') |\n ((df.ipAddressList.str.len() == 0)\n & (df.ip6AddressList.str.len() == 0)),\n 'vrf'] = ''\n\n query_str = build_query_str([], self.schema, vrf=vrf)\n\n addrcols = []\n if 4 in addr_types or ipvers in [\"v4\", \"\"]:\n # df = df.explode('ipAddressList').fillna({'ipAddressList': ''})\n addrcols.append('ipAddressList')\n\n if 6 in addr_types or ipvers in [\"v6\", \"\"]:\n # df = df.explode('ip6AddressList').fillna({'ip6AddressList': ''})\n addrcols.append('ip6AddressList')\n\n if ('ipAddress' in columns or (columns == ['*'])) and not ipvers:\n ndf = pd.DataFrame(df[addrcols].agg(\n self._merge_address_cols, axis=1),\n columns=['ipAddress'])\n df = pd.concat([df, ndf], axis=1)\n\n v4addr = []\n v6addr = []\n filter_prefix = ''\n\n # Address and prefix filtering are mutual exclusive\n if addr:\n macaddr = []\n for i, a in enumerate(addr):\n if addr_types[i] == 0:\n # convert the macaddr format to internal format\n a = convert_macaddr_format_to_colon(a)\n macaddr.append(a)\n elif addr_types[i] == 4:\n if '/' not in a:\n a += '/'\n v4addr.append(a)\n elif addr_types[i] == 6:\n if '/' not in a:\n a += '/'\n v6addr.append(a)\n\n # IMPORTANT: Don't mess with this order of query.\n # Some bug in pandas prevents it from working if\n # macaddr isn't first and your query\n # contains both a macaddr and an IP address.\n dfmac = dfv4 = dfv6 = pd.DataFrame()\n\n if macaddr:\n dfmac = df[df.macaddr.isin(macaddr)]\n\n if v4addr:\n dfv4 = df[df.ipAddressList.apply(\n lambda x, addrs: any(a.startswith(tuple(addrs))\n for a in x), args=(v4addr,))]\n if v6addr:\n dfv6 = df[df.ip6AddressList.apply(\n lambda x, addrs: any(a.startswith(tuple(addrs))\n for a in x), args=(v6addr,))]\n if v4addr or v6addr or macaddr:\n df = pd.concat([dfv4, dfv6, dfmac])\n elif prefix:\n for i, a in enumerate(prefix):\n if addr_types[i] == 4:\n v4addr.append(a)\n elif addr_types[i] == 6:\n v6addr.append(a)\n\n if v4addr:\n for a in v4addr:\n query_str += (f'{filter_prefix} '\n f'@self._is_in_subnet(ipAddressList,\"{a}\")')\n filter_prefix = 'or'\n if v6addr:\n for a in v6addr:\n query_str += (f'{filter_prefix} '\n f'@self._is_in_subnet(ip6AddressList,\"{a}\")')\n filter_prefix = 'or'\n\n if not query_str:\n if ipvers == \"v4\":\n query_str = 'ipAddressList.str.len() != 0'\n elif ipvers == \"v6\":\n query_str = 'ip6AddressList.str.len() != 0'\n elif ipvers == \"l2\":\n query_str = 'macaddr.str.len() != 0'\n\n if query_str:\n df = df.query(query_str)\n\n df = self._handle_user_query_str(df, user_query)\n return df.reset_index(drop=True)[fields]", "def GetDataFrame(self, q_string, var_tup=None):\n def map_to_dict( results, field_names):\n res_dict = {}\n for fn in field_names:\n res_dict[fn] = []\n for res in results:\n for fn, f in zip(field_names, res):\n res_dict[fn].append(f)\n return res_dict\n def map_to_df( results, field_names):\n return pandas.DataFrame.from_dict(map_to_dict( results, field_names ))\n cursor = self.GetCursor()\n l_logger.debug(\"Query: %s, %r\" % (q_string,var_tup))\n cursor.execute(q_string,var_tup)\n results = cursor.fetchall()\n field_names = [i[0] for i in cursor.description]\n if len(results) == 0:\n return None\n else:\n return map_to_df( results, field_names )", "def build_pseudo_ref(self):\n logging.info('Building pseudo finetune dataset')\n logging.info(self.configs[NEWSTEST])\n builder = tfds.builder(WMT_BASE_DATASET_NAME,\n config=self.configs[NEWSTEST],\n data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec()\n eval_data = self.default_eval_builder(builder, shard_spec)\n\n new_data = pickle.load(tf.io.gfile.GFile(self.pseudo_path, 'rb'))\n # Create tensorflow dataset\n tf_pre_dataset = {'inputs': [], 'targets': []}\n for data in new_data:\n inp = data[-2]\n targ = data[-1] # [1:] # Targets have dummy first variable\n tf_pre_dataset['inputs'].append(inp)\n tf_pre_dataset['targets'].append(targ)\n\n tf_dataset = tf.data.Dataset.from_tensor_slices(tf_pre_dataset)\n return tf_dataset, eval_data", "def build_estimator(config, embedding_size=8, hidden_units=None):\n (time, v1, v2, v3, v4,\n v5, v6, v7, v8, v9, \n v10, v11, v12, v13, v14, \n v15,v16,v17,v18,v19,v20,v21,\n v22,v23,v24,v25,v26,v27,v28, amount) = INPUT_COLUMNS\n \"\"\"Build an estimator.\"\"\"\n \n # Reused Transformations.\n # Continuous columns can be converted to categorical via bucketization\n # We use the (bucketized) amount column in the Wide part\n amount_buckets = tf.feature_column.bucketized_column(amount, boundaries=[4,8,12,15,35,75,100, 200, 300, 1000])\n\n # Wide columns and deep columns.\n wide_columns = [amount_buckets]\n\n # All the other CCF features will be used in the deep part\n deep_columns = [\n time, v1, v2, v3, v4,\n v5, v6, v7, v8, v9, \n v10, v11, v12, v13, v14, \n v15,v16,v17,v18,v19,v20,v21,\n v22,v23,v24,v25,v26,v27,v28\n ]\n \n # We hardcode here the models in order to avoid the exponential decaying model which is already implemented\n hidden_units = [20,15]\n\n # We can try either Wide and Deep models or Deep Neural Networks (DNN)\n #\"\"\"\n return tf.contrib.learn.DNNLinearCombinedClassifier(\n config=config,\n linear_feature_columns=wide_columns,\n dnn_feature_columns=deep_columns,\n dnn_hidden_units=hidden_units or [100, 70, 50, 25],\n dnn_optimizer=tf.train.AdamOptimizer(),\n fix_global_step_increment_bug=True\n )\n\n \"\"\"\n deep_columns = deep_columns + [amount]\n return tf.contrib.learn.DNNClassifier(\n config=config,\n feature_columns=deep_columns,\n hidden_units=hidden_units or [100, 70, 50, 25],\n optimizer=tf.train.AdamOptimizer()\n #optimizer=tf.train.ProximalAdagradOptimizer(\n #learning_rate=0.1,\n #l2_regularization_strength=0.001\n\t #)\n )\n #\"\"\"", "def get(dataset_name: str, split: Union[Tuple[str, float], str, tfds.Split],\n **hyperparameters: Any) -> BaseDataset:\n hyperparameters_py = {\n k: (v.numpy().tolist() if isinstance(v, tf.Tensor) else v)\n for k, v in hyperparameters.items()\n }\n logging.info('Building dataset %s with additional kwargs:\\n%s', dataset_name,\n json.dumps(hyperparameters_py, indent=2, sort_keys=True))\n if dataset_name not in DATASETS:\n raise ValueError('Unrecognized dataset name: {!r}'.format(dataset_name))\n\n dataset_class = DATASETS[dataset_name]\n return dataset_class(split=split, **hyperparameters)", "def make_dataset(self, df, **kwargs):\n\t\treturn df" ]
[ "0.5952889", "0.5775487", "0.55588496", "0.529119", "0.5259821", "0.5202929", "0.51925635", "0.5178014", "0.5163325", "0.5140859", "0.5063282", "0.50391376", "0.5026559", "0.49773774", "0.49665532", "0.49620157", "0.49497977", "0.49446902", "0.49109635", "0.4886161", "0.48770732", "0.4871768", "0.48663807", "0.48501498", "0.48491573", "0.48023784", "0.479518", "0.47924542", "0.47824517", "0.4779038" ]
0.6853019
0
Parse a list of MAVEHGVS strings into Variant objects or error messages.
def parse_variant_strings( variants: Iterable[str], targetseq: Optional[str] = None, expected_prefix: Optional[str] = None, ) -> Tuple[List[Optional[Variant]], List[Optional[str]]]: if expected_prefix is not None and expected_prefix not in list("cgmnopr"): raise ValueError("invalid expected prefix") valid = list() invalid = list() for s in variants: try: v = Variant(s, targetseq=targetseq) except MaveHgvsParseError as error: valid.append(None) invalid.append(str(error)) else: if expected_prefix is not None and v.prefix != expected_prefix: valid.append(None) invalid.append("unexpected variant prefix") else: valid.append(v) invalid.append(None) return valid, invalid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_values(self):\n self.assertEqual(\"test\", grammar._VALUE.parseString(\"test\")[0])\n self.assertEqual(\"test(*)\", grammar._VALUE.parseString(\"test(*)\")[0])\n self.assertEqual(\"test(123)\", grammar._VALUE.parseString(\"test(123)\")[0])\n self.assertEqual(\"123\", grammar._VALUE.parseString(\"123\")[0])\n self.assertEqual(\"*\", grammar._VALUE.parseString(\"*\")[0])\n self.assertEqual(\"'test test'\", grammar._VALUE.parseString(\"'test test'\")[0])\n self.assertEqual(\"'test, test'\", grammar._VALUE.parseString(\"'test, test'\")[0])\n\n self.assertEqual([\"test\", \"test(*)\", \"123\", \"'test, test'\"],\n grammar._VALUE_LIST.parseString(\"test, test(*), 123, 'test, test'\").asList())", "def parse_multivector(self, mv_string: str) -> 'MultiVector':\n # Get the names of the canonical blades\n blade_name_index_map = {name: index for index, name in enumerate(self.names)}\n\n # Clean up the input string a bit\n cleaned_string = re.sub('[()]', '', mv_string)\n\n # Create a multivector\n mv_out = MultiVector(self)\n\n # Apply the regex\n for m in _blade_pattern.finditer(cleaned_string):\n # Clean up the search result\n cleaned_match = m.group(0)\n\n # Split on the '^'\n stuff = cleaned_match.split('^')\n\n if len(stuff) == 2:\n # Extract the value of the blade and the index of the blade\n blade_val = float(\"\".join(stuff[0].split()))\n blade_index = blade_name_index_map[stuff[1].strip()]\n mv_out[blade_index] = blade_val\n elif len(stuff) == 1:\n # Extract the value of the scalar\n blade_val = float(\"\".join(stuff[0].split()))\n blade_index = 0\n mv_out[blade_index] = blade_val\n return mv_out", "def test_lspci_mmv_ubuntu_20_10(self):\n self.assertEqual(jc.parsers.lspci.parse(self.f_in['ubuntu_20_10_lspci_mmv'], quiet=True),\n self.f_json['ubuntu_20_10_lspci_mmv'])", "def _parseVec(self, str):\r\n\t\tvec = []\r\n\t\tsplt = str.split()\r\n\t\tfor i in range(0,len(splt)):\r\n\t\t\tvec.append(self._parseNumber(splt[i]))\r\n\t\treturn vec", "def VtVariant(list):\n return win32com.client.VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_VARIANT, list)", "def parse(v, cpy):\n if v.samples[0]['GT'][cpy] == 0: # Not present in this copy\n return None\n alt = v.samples[0].alleles[cpy]\n l_r, l_a = len(v.ref), len(alt)\n if l_r == 1:\n if l_a == 1:\n op, op_len = 'X', 0\n else:\n op, op_len = 'I', l_a - l_r\n elif l_a == 1:\n op, op_len = 'D', l_r - l_a\n else:\n raise ValueError(\"Complex variants present in VCF. Please filter or refactor these.\")\n\n return Variant(v.pos, v.ref, v.samples[0].alleles[cpy], op, op_len)", "def parse_results(variants):\n out = []\n\n # set header\n lines = variants[0].get_output().split('\\n')\n for line in lines[:-1]:\n out.append(line.split('\\t')[0])\n\n # append output for all variants to single list\n for var in variants:\n lines = var.get_output().split('\\n')\n for i in range(0, len(lines) - 1):\n out[i] += '\\t{}'.format(lines[i].split()[1])\n\n return out", "def convert_str_list_to_vector(self, string_list: Tuple[str]) -> numpy.ndarray:\n if len(string_list) != 4:\n logger.error(\"convert_str_list_to_vector got a too short or long string list: {}. We return a zero-vector!\",\n string_list)\n return numpy.zeros(shape=(self.word2vec_embedding_size +\n self.word2vec_embedding_size / 2 +\n self.word2vec_embedding_size / 3 +\n self.word2vec_embedding_size / 4,),\n dtype=\"float32\"\n )\n ret = numpy.zeros(shape=(0,), dtype=\"float32\")\n for i, token in enumerate(string_list):\n logger.trace(\"Process the {}. token \\\"{}\\\"\", (i + 1), string_list[i])\n ret = numpy.concatenate([ret,\n numpy.average(\n numpy.reshape(\n self.word2vec_dict.get(string_list[i],\n numpy.negative(\n numpy.ones(\n shape=(self.word2vec_embedding_size,),\n dtype=\"float32\")\n )),\n (int(self.word2vec_embedding_size / (i + 1)), (i + 1))\n ),\n axis=1)],\n axis=0)\n return ret", "def parsePresetStrings(ps_list):\n\n return [parsePreset(ps) for ps in ps_list]", "def _parse_samples(sample_list, vcf_header):\n if sample_list:\n if isinstance(sample_list, str) or isinstance(sample_list, int):\n sample_list = [str(sample_list)]\n\n for sample in sample_list:\n if sample not in vcf_header:\n raise ValueError('\"{}\" not found in this VCF'.format(sample))\n else:\n sample_list = []\n\n return sample_list", "def test_parse_version(self):\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B110SPC003'),\n [100, 1, 0, 110, 3],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012'),\n [100, 1, 0, 60, 12],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012_FB_3'),\n [100, 1, 0, 60, 12],\n )\n # Incorrect number of digits\n self.assertEqual(\n _parse_sw_version('BaiStation_V10R001C00B060SPC012'),\n None,\n )\n self.assertEqual(\n _parse_sw_version('XYZ123'),\n None,\n )\n self.assertEqual(\n _parse_sw_version(''),\n None,\n )", "def parse_required_vector_value(node, tag_name, message, cast):\n try:\n data = node.getElementsByTagName(tag_name)[0].firstChild.data\n tmp = str(data).split()\n ret = [cast(i) for i in tmp]\n except (IndexError, ValueError):\n sys.exit(message)\n\n if len(ret) == 0:\n sys.exit(message)\n\n return ret", "def parse_vbscript(self, vbscript_str):\n\n if not self.compiled_vbs_var_regex:\n self.compile_regex()\n\n var_matches_all_groups = self.compiled_vbs_var_regex.findall(vbscript_str)\n \n var_matches = []\n for group in var_matches_all_groups:\n var_matches.append(group[0])\n \n var_matches = list(set(var_matches))\n\n func_matches = self.compiled_vbs_func_regex.findall(vbscript_str)\n func_matches = list(set(func_matches))\n\n return {\"var\": var_matches, \"func\": func_matches}", "def parse_ivar_variants(tsv_filename, allow_missing=True):\n\n if file_is_missing(tsv_filename, allow_missing):\n return { 'variants': [] }\n\n variants = []\n\n # Skip first line\n for line in open(tsv_filename).readlines()[1:]:\n t = line.split('\\t')\n assert (len(t) == 19) or (len(t) == 20) # added POS_AA column\n\n if t[3] != '':\n variants.append(f\"{t[2]}{t[1]}{t[3]}\")\n\n return { 'variants': variants }", "def parseString(self, val):\n \n if not isinstance(val, str):\n raise Exception('Input must be a string!')\n if len(val) < 9:\n raise Exception( 'ESDT Names must be 9 characters!' )\n self.setType( val[:2] )\n self.setTime( val[2] )\n self.setFrequency( val[3] )\n self.setHRes( val[4] )\n self.setVRes( val[5] )\n self.setGroup( val[6:9] )\n tmp = val.split('.')\n if len(tmp) == 4:\n self.setVersion( *tmp[1:] )", "def test_lspci_nmmv_ubuntu_20_10(self):\n self.assertEqual(jc.parsers.lspci.parse(self.f_in['ubuntu_20_10_lspci_nmmv'], quiet=True),\n self.f_json['ubuntu_20_10_lspci_nmmv'])", "def parse_victronphoenix(hex_str, port=None):\n\n b = bytes.fromhex(hex_str)\n val = struct.unpack('<BbHHHhHHHHHBBHHHBBBBBx', b)\n\n data = {\n # 0 msgtype\n # 1 msg_ver\n\n #MPPT\n # 2 uint16 mainVoltage_V; // mV\n # 3 uint16 panelVoltage_VPV; // mV ( value needs to be divided by 10 )\n # 4 uint16 panelPower_PPV; // W\n # 5 int6 batteryCurrent_I; // mA ( value needs to be divided by 10 )\n # 6 uint16 yieldTotal_H19; // 0.01 kWh\n # 7 uint16 yieldToday_H20; // 0.01 kWh\n # 8 uint16 maxPowerToday_H21; // W\n # 9 uint16 yieldYesterday_H22; // 0.01 kWh\n # 10 uint16 maxPowerYesterday_H23; // W\n # 11 uint8 errorCode_ERR;\n # 12 uint8 stateOfOperation_CS;\n\n #Phoenix\n # 13 uint16_t p_V; // mV\n # 14 uint16_t p_AC_OUT_V;\n # 15 uint16_t p_AC_OUT_S;\n # 16 uint8_t p_AC_OUT_I;\n # 17 uint8_t p_WARN; // Same as ar but for now can be multiple bits\n # 18 uint8_t p_AR; // alarm convert to 8 bit\n # 19 uint8_t p_CS; // convert to 8 bit\n # 20 uint8_t p_MODE;\n\n 'mpptmainvoltage': val[2],\n 'mpptpanelvoltage': val[3]/10,\n 'mpptpanelpower': val[4],\n 'mpptbatterycurrent': val[5]/10,\n 'mpptyieldTotal': val[6],\n 'mpptyieldToday': val[7],\n 'mpptmaxPowerToday': val[8],\n 'mpptyieldYesterday': val[9],\n 'mpptmaxPowerYesterday': val[10],\n 'mppterrorcode': val[11], # int\n 'mpptstate': val[12], # int\n\n 'p_V': val[13],\n 'p_AC_OUT_V': val[14],\n 'p_AC_OUT_S': val[15],\n 'p_AC_OUT_I': val[16],\n 'p_WARN': val[17],\n 'p_AR': val[18],\n 'p_CS': val[19],\n 'p_MODE': val[20]\n }\n return data", "def parse_string(str_arr):\n def to_arr(str_arr):\n \"\"\" Switch to list. \"\"\"\n row = str_arr.replace(']', '').\\\n replace('[', '').\\\n replace('{', '').\\\n replace('}', '').\\\n replace('\\n', '').split()\n\n if '+-' in row:\n row = kludge_gvars(row)\n row = [gv.gvar(str(elt)) for elt in row]\n return np.array(row)\n\n def kludge_gvars(mangled):\n \"\"\"\n Occasionally, gvars get rendered to strings as, e.g.,\n -4e-06 +- 1 instead of -0.000006(1.0). This makes a\n complete mess of trying to parse the a list of gvar\n which has been turned into a string, e.g.,\n '[1(2) 1 +- 2 0.003(2)]', since the usual str.split()\n separates '1 +- 2' --> ['1','+-','2']. This function is\n a kludge which works around this difficulty.\n \"\"\"\n # Loop in reverse looking for '+-', but don't run off the end\n for idx in range(len(mangled) - 1)[::-1]:\n if mangled[idx + 1] == '+-':\n reunited = ' '.join(mangled[idx:idx + 3])\n # Throw away the used elements...\n for _ in range(3):\n mangled.pop(idx)\n # Repair the list with reunited gvar string\n mangled.insert(idx, reunited)\n return mangled\n\n return to_arr(str_arr)", "def parse_snmp_response(response, type):\n values = []\n root = etree.fromstring(response)\n body = root.findall('{%s}Body'%'http://schemas.xmlsoap.org/soap/envelope/')\n for b in body:\n message = b.findall('{%s}message'%'http://ggf.org/ns/nmwg/base/2.0/')\n for m in message:\n data = m.findall('{%s}data'%'http://ggf.org/ns/nmwg/base/2.0/')\n for d in data:\n datum = d.findall('{%s}datum'%'http://ggf.org/ns/nmwg/base/2.0/')\n for d2 in datum:\n #to check this is not an error message\n if d2.text != '':\n if d2.attrib['value'] != '' and d2.attrib['value'] != None and d2.attrib['value'] != 'nan':\n v = {}\n v['timeValue'] = datetime.fromtimestamp(float(d2.attrib['timeValue']))\n v['value']=d2.attrib['value']\n if type!=\"lamp\":\n v['valueUnits'] = d2.attrib['valueUnits']\n values.append(v)\n\n return values", "def test_vpn_string_conversion():\n for v in vpns:\n assert len(str(v)) > 0", "def test_transform_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n SCATTER bigfoot X TRANSFORM 1.2 Y temperature_mid\n \"\"\"\n\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_name_to_variant_long():\n genome = MockGenomeTestFile(\n db_filename='hg19.fa',\n filename='pyhgvs/tests/data/test_hgvs.genome',\n create_data=False)\n\n # Read transcripts.\n with open('pyhgvs/data/genes.refGene', 'r') as infile:\n transcripts = read_transcripts(infile)\n\n class NoTranscriptError(Exception):\n pass\n\n def get_transcript_long(name):\n \"\"\"Return a transcript name for the long test.\"\"\"\n transcript = transcripts.get(name)\n if not transcript:\n raise NoTranscriptError(name)\n chrom = transcript.tx_position.chrom\n\n # Skip alternative haplotypes.\n if '_' in chrom:\n raise NoTranscriptError(name)\n\n # Skip sex chromosomes.\n if chrom in ('', 'chrX', 'chrY'):\n raise NoTranscriptError(name)\n\n return transcript\n\n errors = []\n with open('pyhgvs/tests/data/test_hgvs.txt', 'r') as infile:\n for i, line in enumerate(infile):\n row = line.rstrip().split('\\t')\n chrom, offset, ref, alt, hgvs_name = row[:5]\n offset = int(offset)\n\n try:\n hgvs_variant = parse_hgvs_name(\n hgvs_name, genome, get_transcript=get_transcript_long)\n except NoTranscriptError:\n continue\n\n unnorm_variant = (chrom, offset, ref, alt)\n chrom, offset, ref, alts = normalize_variant(\n chrom, offset, ref, [alt], genome).variant\n variant = (chrom, offset, ref, alts[0])\n\n if hgvs_variant != variant:\n errors.append(repr([hgvs_variant, variant,\n unnorm_variant, hgvs_name]))\n\n assert not errors, '\\n'.join(errors)", "def parse_lspci_vv_chk_error(output,raiseOnErrors = \"1\"):\n \n found_devSta = 0\n \n #sys.exit(1)\n if re.search(\"DevSta\",output):\n found_devSta = 1\n \n # remove DevStat after splitting it\n l_a = output.split(\":\")\n l_a1 = l_a[1].split()\n for m in l_a1:\n \n # if ends with +, \n if re.search(\"Err\",m):\n if re.search(\".*\\+$\",m):\n \n print \"-\" * 8\n \n errorStr = \"Found + in lspci output for '%s' , line details '%s'\"%(m,output)\n trace_error(errorStr)\n if raiseOnErrors == \"1\":\n raise ViriError(errorStr)\n\n return 2\n \n if found_devSta == 0:\n raise ViriError(\"Did not find 'devSta' in the output %s\"%output)\n\n trace_info(\"No lspci correctable or uncorrectable issues seem to be present , output '%s'\"%output)\n return 1", "def parseKVs(self, kvl):\n \n if isinstance(kvl, str):\n return Parsing.parseKVs(kvl)\n\n od = collections.OrderedDict()\n if kvl is not None:\n for i in kvl:\n if isinstance(i, str):\n k, v, junk = Parsing.parseKV(i)\n od[k] = v\n elif type(i) in (list, tuple) and len(i) == 2:\n k, v, junk = Parsing.parseKV(\"%s=%s\" % i)\n else:\n CPL.log('Reply', 'kvl item is not a string: %r' % (i))\n raise Exception(\"kvl == %r\" % (i))\n\n return od", "def test_xyzp_qm_7a():\n subject = subject7\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True, dtype='psi4')", "def test_parsingValues(self):\n argV = (\"--fooint 912 --foofloat -823.1 \"\n \"--eggint 32 --eggfloat 21\").split()\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 912)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], -823.1)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], 32)\n self.assert_(isinstance(self.usage.opts['eggint'], int))\n self.failUnlessEqual(self.usage.opts['eggfloat'], 21.)\n self.assert_(isinstance(self.usage.opts['eggfloat'], float))", "def parse_lens_response_str(response):\n\n import re\n\n result = dict(MOSI=[], MISO=[], timedOut=False, interface_error=False)\n result['return_str'] = response\n\n for line in response:\n\n if \"Error\" in line:\n result[\"interface_error\"] = True\n\n if 'Timed out' in line:\n result['timedOut'] = True\n\n if 'fast mode' in line:\n result['fastMode'] = 1\n\n if 'slow mode' in line:\n result['fastMode'] = 0\n\n for ch in ['MOSI', 'MISO']:\n if ch in line:\n for match in re.finditer('Ox', line):\n result[ch].append(line[match.start() + 2:match.start() + 4])\n\n return result", "def parse_protocol_version(self, version_string_list):\n # Verify for every provided string if it is in proper versioning format\n for version_string in version_string_list:\n\n try:\n parsed_version_string = version_string.split('.')\n if len(parsed_version_string) == 1 and version_string.isdigit():\n # No dots in version string, it is a simple integer.\n continue\n\n StrictVersion(version_string)\n\n except (AttributeError, ValueError):\n LOG.debug('Invalid protocol version string provided')\n return version_string\n\n # Check for malformatting\n for i in range(len(parsed_version_string)):\n if len(parsed_version_string[i]) > 1:\n if parsed_version_string[i][0] == '0': # Leading 0's\n return version_string\n if len(parsed_version_string[i]) < 1: # Empty strings\n return version_string\n\n # Protocol version formating: OK\n return None", "def ParseRval(rval_content):\n\n lines = rval_content.split('\\n')\n last_line = lines.pop()\n assert last_line == ''\n verdict = NCVAL_VERDICT[lines.pop()]\n\n offsets = set()\n for prev_line, line in zip([None] + lines, lines):\n if line.startswith('VALIDATOR: Checking jump targets:'):\n continue\n if line.startswith('VALIDATOR: Checking that basic blocks are aligned'):\n continue\n\n # Skip disassembler output of the form\n # VALIDATOR: 0000000000000003: 49 89 14 07 mov [%r15+%rax*1], %rdx\n m = re.match(r'VALIDATOR: ([0-9a-f]+):', line, re.IGNORECASE)\n if m is not None:\n continue\n\n # Parse error message of the form\n # VALIDATOR: ERROR: 20: Bad basic block alignment.\n m = re.match(r'VALIDATOR: ERROR: ([0-9a-f]+): (.*)', line, re.IGNORECASE)\n if m is not None:\n offset = int(m.group(1), 16)\n offsets.add(offset)\n continue\n\n # Parse two-line error messages of the form\n # VALIDATOR: 0000000000000003: 49 89 14 07 mov [%r15+%rax*1], %rdx\n # VALIDATOR: ERROR: Invalid index register in memory offset\n m = re.match(r'VALIDATOR: (ERROR|WARNING): .*$', line, re.IGNORECASE)\n if m is not None:\n message_type = m.group(1)\n assert prev_line is not None, (\n \"can't deduce error offset because line %r \"\n \"is not preceded with disassembly\" % line)\n m2 = re.match(r'VALIDATOR: ([0-9a-f]+):', prev_line, re.IGNORECASE)\n assert m2 is not None, \"can't parse line %r preceding line %r\" % (\n prev_line,\n line)\n offset = int(m2.group(1), 16)\n if message_type != 'WARNING':\n offsets.add(offset)\n continue\n\n raise AssertionError(\"can't parse line %r\" % line)\n\n return ValidatorResult(verdict=verdict, offsets=offsets)", "def parse(self, string):\n parse = re.match(\"^((?:[0-9]{1,3}\\.){3}[0-9]{1,3})\\s\\(((?:\\d)*\\.(?:\\d)*|(?:\\d)*)\\sms\\)$\", string)\n parse_result = parse.groups()\n return parse_result[0], parse_result[1]" ]
[ "0.55300385", "0.54785925", "0.5428499", "0.54011434", "0.53826064", "0.5340443", "0.5202114", "0.5156769", "0.51507455", "0.5124695", "0.5124507", "0.5102875", "0.5073347", "0.5016438", "0.49852303", "0.49693668", "0.4961004", "0.49565905", "0.495055", "0.49437034", "0.49418756", "0.49209923", "0.49196208", "0.49086615", "0.49005666", "0.4884244", "0.48687154", "0.48657942", "0.4859129", "0.48515713" ]
0.6759886
0
Generate a batch of binary masks for data.
def _generate_masks(self, data, batch_size): height, width = data.shape[2], data.shape[3] mask_size = (self._down_sample_size, self._down_sample_size) up_size = (height + mask_size[0], width + mask_size[1]) mask = np.random.random((batch_size, 1) + mask_size) < self._mask_probability upsample = resize(op.Tensor(mask, data.dtype), up_size, self._resize_mode).asnumpy() shift_x = np.random.randint(0, mask_size[0] + 1, size=batch_size) shift_y = np.random.randint(0, mask_size[1] + 1, size=batch_size) masks = [sample[:, x_i: x_i + height, y_i: y_i + width] for sample, x_i, y_i in zip(upsample, shift_x, shift_y)] masks = Tensor(np.array(masks), data.dtype) return masks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bits():\n for d in data:\n for i in [5, 4, 3, 2, 1, 0]:\n yield (d >> i) & 1", "def test_get_mask(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n mask = spine_data_loader.get_mask(str(idx))\n assert mask.shape == (256, 256, 1)\n assert mask.dtype == 'int64'", "def create_binary_masks(image_path):\n mask = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)\n size = mask.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if mask[row_pixel, column_pixel] == 0:\n mask[row_pixel, column_pixel] = 65535\n\n else:\n mask[row_pixel, column_pixel] = 0\n\n cv2.imwrite(image_path[:-4]+'_binary.png', mask)", "def apply_mask(data, mask):\n if len(mask) != 4:\n raise ValueError(\"mask must contain 4 bytes\")\n\n return bytes(b ^ m for b, m in zip(data, itertools.cycle(mask)))", "def mask(self):\n return list(self._mask_generator())", "def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask", "def encode(self, data, mask, batch_size=128):\n features = np.zeros((len(data), self.feature_dim))\n n_batches = int(len(data) / batch_size)\n\n for i in tqdm(range(n_batches)):\n cur_data = data[i * batch_size:(i + 1) * batch_size]\n cur_mask = mask[i * batch_size:(i + 1) * batch_size]\n features[i * batch_size:(i + 1) * batch_size] = self.encode_fn(cur_data, cur_mask)\n if n_batches * batch_size < len(data):\n cur_data = data[n_batches * batch_size:]\n cur_mask = mask[n_batches * batch_size:]\n features[n_batches * batch_size:] = self.encode_fn(cur_data, cur_mask)\n\n return features", "def signal_masks(simulatedata_cbma):\n _, (ground_truth_foci, dataset) = simulatedata_cbma\n ground_truth_foci_ijks = [\n tuple(mm2vox(focus, dataset.masker.mask_img.affine)) for focus in ground_truth_foci\n ]\n return _create_signal_mask(np.array(ground_truth_foci_ijks), dataset.masker.mask_img)", "def _make_masks(ilens, olens):\n # (B, T_in)\n in_masks = make_non_pad_mask(ilens)\n # (B, T_out)\n out_masks = make_non_pad_mask(olens)\n # (B, T_out, T_in)\n\n return paddle.logical_and(\n out_masks.unsqueeze(-1), in_masks.unsqueeze(-2))", "def model_masks(self, prunable=None):\n # TODO Also accept a dataloader\n pass\n # return masks", "def _prepare_mask_file(mask):\n result = np.ndarray((mask.shape[0], mask.shape[1]), dtype=np.uint8)\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n\n if mask[i][j] > 0:\n result[i][j] = 1\n else:\n result[i][j] = 0\n \n return result", "def gen_raw_from_bitified(data, logical_bit):\n for bit in data:\n yield bit * logical_bit", "def generate_default_mask(data, dim1=None):\n batch_size = data.size(1)\n sequence_len = data.size(0)\n if dim1 is None:\n dim1 = sequence_len\n return torch.zeros(batch_size, dim1, sequence_len).bool().to(data.device)", "def generate_mask(data, tps, length, tp_union):\n tp_map = {tp_union[i].item(): i for i in range(len(tp_union))}\n\n mask = np.zeros((data.shape[0], tp_union.shape[0]))\n e_data = torch.zeros((data.shape[0], tp_union.shape[0], data.shape[2]))\n e_data = e_data.to(data.device)\n r_arr = []\n\n for i in range(len(mask)):\n inds = [tp_map[tps[i][j].item()] for j in range(length[i])]\n mask[i, inds] = 1\n e_data[i, inds] = data[i, :length[i]]\n r_arr.append(np.where(mask[i] == 1)[0])\n\n return mask, e_data, r_arr", "def gen_masks(num_masks, features, hidden_layers, hidden_units):\n\n # This array should contain numbers 1-784\n features_indices = []\n for i in range(features):\n features_indices.append(i + 1)\n masks = []\n indices = []\n for i in range(num_masks):\n set_masks = [] # Will contain all masks for the set\n # Randomize the input (and output, since they have to be the same)\n # ordering\n set_features = [] # Input and output node indices for the set\n for index in features_indices:\n set_features.append(index)\n np.random.RandomState(np.random.randint(0, 2**32)).shuffle(\n set_features)\n indices.append(set_features)\n prev_indices = set_features\n for j in range(hidden_layers):\n layer_indices = []\n for k in range(hidden_units):\n # The hidden nodes' indices need to be between the minimum\n # index from the previous layer and one less than the number\n # of features, inclusive.\n layer_indices.append(np.random.randint(low=min(prev_indices),\n high=features))\n mask = np.zeros((len(prev_indices), len(layer_indices)),\n dtype=np.float32)\n for k in range(len(prev_indices)):\n for l in range(len(layer_indices)):\n # The mask value will be one when the autoregressive\n # condition is met.\n mask[k][l] = float(int(prev_indices[k] <= layer_indices[l]))\n mask = tf.convert_to_tensor(mask, dtype=tf.float32)\n set_masks.append(mask)\n prev_indices = layer_indices\n output_mask = np.zeros((len(prev_indices), features), dtype=np.float32)\n for j in range(len(prev_indices)):\n for k in range(len(set_features)):\n output_mask[j][k] = float(int(prev_indices[j] < set_features[k]))\n output_mask = tf.convert_to_tensor(output_mask, dtype=tf.float32)\n set_masks.append(output_mask)\n direct_mask = np.zeros((features, features), dtype=np.float32)\n for j in range(features):\n for k in range(features):\n direct_mask[j][k] = float(int(set_features[j] < set_features[k]))\n direct_mask = tf.convert_to_tensor(direct_mask, dtype=tf.float32)\n set_masks.append(direct_mask)\n masks.append(set_masks)\n return{'masks': masks, 'indices': indices}", "def generate_padding_masks(data, pad_value=0):\n with torch.no_grad():\n mask = (data == pad_value).to(data.device).t().unsqueeze(1)\n return mask", "def apply_mask(data: bytes, mask: bytes) -> bytes:\n if len(mask) != 4:\n raise ValueError(\"mask must contain 4 bytes\")\n\n return bytes(b ^ m for b, m in zip(data, itertools.cycle(mask)))", "def make_mask(data, pad):\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask", "def random_masks(self):\n # initialize mask\n mask = np.ones((3, self.dim, self.dim))\n\n # generate one of 4 random masks\n choose = 1 # np.random.randint(0, 1)\n if choose == 0:\n mask[:, :self.dim // 2] = 0\n elif choose == 1:\n mask[:, :, :self.dim // 2] = 0\n elif choose == 2:\n mask[:, :, self.dim // 2:] = 0\n elif choose == 3:\n mask[:, self.dim // 2:] = 0\n\n return mask", "def make_mask(data, pad):\n\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask", "def __get_masks(x_shape, y, n_train=None):\n # type: (Tuple[int], np.ndarray, int) -> (np.ndarray, np.ndarray)\n n_train = n_train if n_train is not None else const.n_train\n\n if n_train <= 0 or n_train > x_shape[0]:\n return np.full(shape=x_shape, fill_value=True, dtype=bool), np.full(shape=y.shape, fill_value=True, dtype=bool)\n\n all_indexes = defaultdict(list) # type: Dict[int, List[int]]\n for i in range(len(y)):\n curr = int(y[i])\n all_indexes[curr].append(i)\n\n ratios = defaultdict() # type: Dict[int, float]\n\n for i, j in all_indexes.items():\n ratios[i] = (len(j) * 1. / len(all_indexes[0]))\n\n # Ratios split the whole dataset to ratios given class and first class.\n # Part scales these ratios up, so that, 'part' corresponds to size of first class.\n part = n_train * 1. / sum(ratios.values())\n if part == 0: # n_train is 0.\n part = len(y) * 1. / sum(ratios.values())\n\n # Masks of what to keep.\n indexes_x = np.full(shape=x_shape, fill_value=False, dtype=bool)\n indexes_y = np.full(shape=y.shape, fill_value=False, dtype=bool)\n\n for i in all_indexes.keys():\n chosen_idxs = random.sample(all_indexes[i], int(part * ratios[i]))\n indexes_y[chosen_idxs] = True\n indexes_x[chosen_idxs, ...] = True\n\n return indexes_x, indexes_y", "def get_label_masks(self, vocabs, language):\n fn = 'data/{}/conll09/train.txt'.format(language)\n lemma_to_preds = get_lemma_to_preds(fn)\n masks = np.zeros((vocabs['plemmas'].size, vocabs['predicates'].size),\n dtype=np.float32)\n for i, lemma in vocabs['plemmas'].idx_to_word.iteritems():\n if lemma in lemma_to_preds:\n preds = lemma_to_preds[lemma]\n idxs = vocabs['predicates'].encode_sequence(preds)\n for j in idxs:\n masks[i][j] = 1.0\n else:\n masks[i, :] = 1.0 # Allow everything\n return masks", "def build_attention_mask(input_ids): \n attention_masks = [] \n\n # 1 for input and 0 for pad\n for seq in input_ids: \n attention_masks.append([float(i>0) for i in seq])\n\n return attention_masks", "def _find_masks(batch, min_size=10):\n result = []\n for b in batch:\n assert b.shape[0] == 1\n patch = b[0]\n z_sum = patch.sum(axis=(1, 2))\n coords = np.where(z_sum > min_size)[0]\n if len(coords) > 0:\n ind = coords[len(coords) // 2]\n result.append(b[:, ind:ind + 1, ...])\n else:\n ind = b.shape[1] // 2\n result.append(b[:, ind:ind + 1, ...])\n\n return np.stack(result, axis=0)", "def get_masks(data):\n return [patient[0] for i, patient in enumerate(data) if i in good_patients]", "def get_binary_mask(self,index):\n mask = self.load_mask_png(index)\n (rows,cols) = np.where(mask>0)[0:2] #pixels in mask disregarding the color\n new_mask = np.zeros(shape=mask.shape[0:2], dtype=np.uint8)\n new_mask[(rows,cols)] = 255\n return new_mask", "def create_all_mask(mask, num, stride):\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)", "def get_data_mask(self):\n if self._cached_mask is None:\n self._cached_mask = ones(shape(self._data), bool)\n return self._data, self._cached_mask", "def _generateResults_combosToBitmasks(self, all, want_combos):\n names = {}\n iter = enumerate(all)\n try:\n i, name = next(iter)\n while True:\n names[name] = (1 << i)\n i, name = next(iter)\n except StopIteration:\n pass\n print \"combosToBitmasks names:\", names\n results = []\n for combo in want_combos:\n mask = 0\n for name in combo:\n if name in names:\n mask |= names[name]\n results.append(mask)\n return results", "def __data_generation(self, image_mask_dirs): # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, *self.dim, 1))\n\n # Generate data\n for i, dirs in enumerate(image_mask_dirs):\n # Store image\n x_img = cv2.imread(dirs[0])\n X[i,] = cv2.cvtColor(x_img, cv2.COLOR_BGR2RGB)\n\n # Store mask\n y_img = cv2.imread(dirs[1], cv2.IMREAD_GRAYSCALE).reshape((*self.dim, 1))\n y[i,] = y_img\n\n if self.preprocessor is not None:\n X = self.preprocessor(X)\n y = self.preprocessor(y)\n\n X = X.astype('float32')\n X /= 255\n y = y.astype('float32')\n y /= 255\n\n return X, y" ]
[ "0.65286094", "0.64621294", "0.63746774", "0.62852716", "0.6263692", "0.6249675", "0.6237803", "0.62282276", "0.6218766", "0.62182695", "0.6199647", "0.6149319", "0.6104692", "0.6101146", "0.60662824", "0.6061052", "0.60520583", "0.6050134", "0.6042464", "0.60416996", "0.6024597", "0.60163856", "0.6012869", "0.60072154", "0.59626585", "0.59147644", "0.5914565", "0.59137726", "0.58738375", "0.5849612" ]
0.7697249
0
To unify targets to be 2D numpy.ndarray.
def _unify_targets(inputs, targets): if isinstance(targets, int): return np.array([[targets] for _ in inputs]).astype(np.int) if isinstance(targets, Tensor): if not targets.shape: return np.array([[targets.asnumpy()] for _ in inputs]).astype(np.int) if len(targets.shape) == 1: return np.array([[t.asnumpy()] for t in targets]).astype(np.int) if len(targets.shape) == 2: return np.array([t.asnumpy() for t in targets]).astype(np.int) return targets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def targets(self) -> Optional[jnp.ndarray]:\n pass", "def target_array(self):\n target_dtype = np.dtype([('targetID', np.int64),\n ('x', np.float64),\n ('y', np.float64),\n ('z', np.float64),\n ('priority', np.int32),\n ('fiberType', np.string_, 30)])\n\n target_array = np.zeros(len(self.targetDict), dtype=target_dtype)\n i = 0\n for k, td in self.targetDict.items():\n td = self.targetDict[k]\n target_array['targetID'][i] = td.id\n target_array['x'][i] = td.xWok\n target_array['y'][i] = td.yWok\n target_array['z'][i] = td.zWok\n target_array['priority'][i] = td.priority\n ft = fiberType2Str[td.fiberType]\n target_array['fiberType'][i] = ft\n i = i + 1\n return(target_array)", "def transform(self, x: Array2D) -> Array2D:", "def package_density_matrices(y: Array) -> Array:\n # As written here, only works for (n,n) Arrays\n obj_arr = np.empty(shape=(1), dtype=\"O\")\n obj_arr[0] = y\n return obj_arr", "def map_targets(y, mapping=None):\r\n y_converted = []\r\n\r\n if mapping is None:\r\n y_converted = y\r\n else:\r\n if isinstance(mapping, list) or isinstance(mapping, (np.ndarray, np.generic)):\r\n if isinstance(y[0], list) or isinstance(y[0], (np.ndarray)): # if nested targets\r\n y_converted = y.copy()\r\n\r\n print(\"array of array1\")\r\n\r\n for indy, y_tmp in enumerate(y):\r\n y_converted[indy] = mapping[y_tmp]\r\n else: # if list\r\n print(\"array1\")\r\n\r\n y_converted = np.array(mapping[y])\r\n\r\n elif isinstance(mapping, dict):\r\n if isinstance(y[0], list) or isinstance(y[0], (np.ndarray)): # if nested targets\r\n y_converted = y.copy()\r\n\r\n print(\"array of array2\")\r\n for indy, y_tmp in enumerate(y):\r\n y_converted[indy] = [mapping.get(y_tmp2) for y_tmp2 in y_tmp]\r\n else:\r\n print(\"array2\")\r\n\r\n y_converted = np.array([mapping.get(y_tmp) for y_tmp in y])\r\n else:\r\n raise TypeError('y must be list, ndarray, dict or None')\r\n\r\n return y_converted", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def _as_numpy(y):\n if y is None:\n return None\n elif isinstance(y, np.ndarray):\n return np.copy(y)\n elif hasattr(y, 'as_matrix'):\n return y.as_matrix()\n elif hasattr(y, 'tolist'):\n return y.tolist()\n elif is_iterable(y):\n return np.asarray([i for i in y]) # might accidentally force object type in 3\n raise TypeError('cannot convert type %s to numpy ndarray' % type(y))", "def atleast_2d(x):\n return np.atleast_2d(x).T if x.ndim < 2 else x", "def dataConvertToNumpy( self ):\n self.featureNumpy = np.asarray( self.feature )\n self.ClassNumpy = np.asarray( self.Class )", "def make_2d(x):\n return x.reshape((1, len(x)))", "def to_numpy(x):\r\n return x.squeeze().detach().cpu().numpy()", "def to_numpy(self, **kwargs):\n pass", "def to_2dnp_array(X):\r\n if isinstance(X, np.ndarray):\r\n if X.ndim == 1:\r\n return X.reshape((-1, 1))\r\n if X.ndim == 2:\r\n return X\r\n if isinstance(X, Number):\r\n X = [X]\r\n X = np.array(X)\r\n X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])\r\n return X", "def generate_array(hdulist, features, targets):\n \"\"\"\n #To do:\n -get targets and features via .conf file\n -> use field_names = hdulist_test[1].columns.names\n \"\"\"\n\n '''extract data'''\n astro_data = hdulist[1].data\n\n '''get all float like and feature matching data'''\n data_float = np.squeeze(np.array([astro_data.field(0)]))\n for x in range(0, len(astro_data[0])):\n if isinstance(astro_data.field(x)[1], (int, float, complex)) is True\\\n and x not in targets and x in features:\n data_float = np.vstack((data_float, np.squeeze(np.array([astro_data.field(x)]))))\n '''get all and target matching data'''\n targets_float = np.squeeze(np.array(astro_data.field(targets[0])))\n for x in range(len(targets)):\n targets_float = np.vstack((targets_float, np.squeeze(np.array(astro_data.field(targets[x])))))\n print('Selected Feature: ' + hdulist_test[1].columns.names[targets[x]])\n '''return'''\n return data_float, targets_float", "def column_or_row_or_1d(y: npt.NDArray) -> npt.NDArray:\n shape = np.shape(y)\n if (len(shape) == 1) or (len(shape) == 2 and (shape[1] == 1 or shape[0] == 1)):\n return np.ravel(y)\n else:\n raise ValueError(\"bad input shape {0}\".format(shape))", "def getArray2d(self):\n\t\treturn self.array2d", "def test_to_ndarray(ndim):\n workdir = os.path.join(os.getenv(\"HOME\"), \"travis_short_workdir\")\n args_list = [\"--workdir\", workdir]\n parser, args_remaining = common_args(\"test_model_config\", \"test_problem\", args_list)\n args = parser.parse_args(args_remaining)\n config = read_cfg_file(args)\n ModelConfig(config[\"modelinfo\"])\n\n arg_in_shape = tuple(range(3, 3 + ndim))\n arg_in = np.full(arg_in_shape, RegionScalars(1.0))\n expected_shape = arg_in_shape + (1,)\n expected = np.full(expected_shape, [1.0])\n\n result = to_ndarray(arg_in)\n assert result.shape == expected.shape\n assert np.all(result == expected)", "def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs", "def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs", "def _evaluate(self, targets: jnp.ndarray) -> jnp.ndarray:\n pass", "def targets(self) -> Tensor:\n data_normalized = self._dataset_values.clone()\n data_normalized -= self._row_min\n data_normalized /= self._row_range\n _, outputs_normalized_transformed = self._transform(\n data_normalized[:, self._input_column_indices],\n data_normalized[:, self._output_column_indices],\n )\n assert outputs_normalized_transformed.ndim == 1 or (\n outputs_normalized_transformed.ndim == 2\n and outputs_normalized_transformed.size(1) == 1\n ), \".targets requires exactly one output column (to match TorchVision datasets).\"\n return outputs_normalized_transformed.view(-1).clone()", "def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)", "def test_targets(iris):\n assert iris.num_targets == 3\n np.testing.assert_array_equal(\n iris.target_names, [\"setosa\", \"versicolor\", \"virginica\"]\n )", "def get_onehot_targets(self):\n return torch.from_numpy(self.targets)", "def format_mnist_target(data=None):\n data_shape = data.shape\n data = np.reshape(data, (data_shape[0], data_shape[1]))\n return data", "def _to_numpy_ndarray(cls, data):\n if isinstance(data, np.ndarray):\n return data\n arr = np.array(data, dtype=np.float)\n if len(arr.shape) == 1:\n arr = np.reshape(arr, newshape=(1, arr.shape[0]))\n return arr", "def make_np(x: Union[Tensor, np.ndarray, Number]) -> np.ndarray:\n if isinstance(x, np.ndarray):\n return x\n if np.isscalar(x):\n return np.array([x])\n if isinstance(x, Tensor):\n return x.detach().cpu().numpy()\n raise NotImplementedError(\n \"Got {}, but numpy array, scalar, or torch tensor are expected.\".format(type(x))\n )", "def __check_2d_and_reshape(X):\n if len(X.shape) == 1:\n X = np.reshape(X, (-1, X.shape[0]))\n return X", "def _to_ndarray(data):\n return np.atleast_1d(getattr(data, 'values', data))", "def to_multi_label_matrix(target_labels: List[List[str]], label_names: List[str]) -> np.ndarray:\n def map_multi_label_line(line_labels: List[str]) -> List[int]:\n return [1 if label in line_labels else 0 for label in label_names]\n\n return np.array(list(map(map_multi_label_line, target_labels)))" ]
[ "0.6756555", "0.61209065", "0.6060304", "0.60284156", "0.6013608", "0.596247", "0.58169913", "0.58032334", "0.5798457", "0.5793701", "0.5786053", "0.57662976", "0.5763996", "0.5749886", "0.570804", "0.57043284", "0.56917435", "0.56873614", "0.56873614", "0.56604946", "0.5652808", "0.5638846", "0.5628106", "0.5621247", "0.560033", "0.55866", "0.5575612", "0.556463", "0.5543353", "0.55388" ]
0.6536828
1
Compiles a message that can be posted to Slack after a call has been made
def compile_slack_phone_message(phone_from, phone_to, status, location): call_from_user = _query_user(phone_from) call_from = _format_caller(call_from_user, phone_from) call_to_user = _query_user(phone_to) call_to = _format_caller(call_to_user, phone_to) location_str = list(filter(lambda x: x[0] == location, Located.LOCATION_CHOICES)) if not location_str: logger.error('Unknown café choice: %d' % (location,)) location_str = 'Okänt café' else: location_str = location_str[0][1] fallback = 'Ett samtal till %s från %s har %s.' % ( location_str, call_from, ('blivit taget av %s' if status == 'success' else 'missats av %s') % (call_to,), ) fields = [ { 'title': 'Status', 'value': 'Taget' if status == 'success' else 'Missat', 'short': True }, { 'title': 'Café', 'value': location_str, 'short': True }, { 'title': 'Mottagare', 'value': call_to, 'short': False } ] if call_from_user is not None and call_from_user['groups']: groups = call_from_user['groups'] groups_str = '%s %s tillhör %s: %s.' % ( call_from_user['first_name'], call_from_user['last_name'], 'grupperna' if len(groups) > 1 else 'gruppen', ', '.join(groups) ) fallback += '\n\n%s' % groups_str fields += [ { 'title': 'Grupper', 'value': groups_str, 'short': False } ] return { 'attachments': [ { 'pretext': 'Nytt samtal från %s' % call_from, 'fallback': fallback, 'color': 'good' if status == 'success' else 'danger', 'fields': fields } ] }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_message(ctx, question, answer):\n return preamble.format(channel=rules_channel(ctx).id) + question + answer", "def message(**payload):\n web_client = payload[\"web_client\"]\n\n # Getting information from the response\n data = payload[\"data\"]\n channel_id = data.get(\"channel\")\n text = data.get(\"text\")\n subtype = data.get(\"subtype\")\n ts = data['ts']\n user = data.get('username') if not data.get('user') else data.get('user')\n # Creating a Converstion object\n message = Message(ts, user, text)\n\n # Appending the converstion attributes to the logs\n conversation.append(message.toDict())\n\n if subtype == 'bot_message': return\n\n do_respond(web_client, channel_id, text)", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "def compile_slack_sms_message(_sms_from, message):\n sms_from_user = _query_user(_sms_from)\n sms_from = _format_caller(sms_from_user, _sms_from)\n pretext = \"Nytt SMS från %s\" % (sms_from, )\n fallback = \"%s \\n\\\"%s\\\"\" % (pretext, message)\n\n return {\n 'attachments': [\n {\n 'pretext': pretext,\n 'fallback': fallback,\n 'color': 'warning',\n 'text': message\n }\n ]\n }", "def _create_message(self, msg):\n head = msg[\"head\"]\n body = msg[\"body\"]\n body = body.format(**self.data)\n length = len(body)\n head = head.format(length=length, **self.data)\n return head + body", "def message_of(cfg, ticket, phase):\n return cfg[\"message_template\"] % (ticket, text(cfg, phase))", "def build_message(cmd, data):\r\n\tif len(cmd) > CMD_FIELD_LENGTH or len(data) > MAX_DATA_LENGTH:\r\n\t\treturn None\r\n\tfull_cmd = cmd + \" \"*(CMD_FIELD_LENGTH-len(cmd))\r\n\tdata_len = str(len(data))\r\n\tfull_data_len = \"0\"*(LENGTH_FIELD_LENGTH-len(data_len))+data_len\r\n\tfull_msg = DELIMITER.join([full_cmd, full_data_len, data])\r\n\treturn full_msg", "def do_something(incoming_msg):\n return \"i did what you said - {}\".format(incoming_msg.text)", "def horde_message(self, message):", "def c_message(text):\n string = text\n string = string.replace('_', ' ')\n return \"C {}\".format(string)", "def main():\n # Events from google calendar:\n events = GC.get_events()\n\n message_info = MC.create_message(events, True)\n message_to_post = message_info[0]\n emojis = message_info[1]\n\n # Post message to Slack\n posted_message = SL.send_message(\n sc, channels[\"bot-dev\"], message_to_post, True)\n\n # React the emojis to Slack\n for emoji in emojis:\n SL.emoji_react(sc, emoji, posted_message, False)\n\n correct_message = input(\"Did the message look ok in the #polls channel? If you answer with 'y' it will be posted in the polls channel. If you answer 'gen' it will be posted in general. If you answer with 'n' or anything other than the commands shown before, then it will be stopped and not posted any where else.\\n\")\n\n if \"gen\" in correct_message.lower():\n posted_message = SL.send_message(\n sc, channels[\"general\"], message_to_post, True)\n for emoji in emojis:\n SL.emoji_react(sc, emoji, posted_message, False)\n elif \"y\" in correct_message.lower():\n posted_message = SL.send_message(\n sc, channels[\"polls\"], message_to_post, True)\n for emoji in emojis:\n SL.emoji_react(sc, emoji, posted_message, False)", "def build_message(text: str, **kwargs: dict) -> dict:\n\n # Defaults to in_channel\n response_type = 'in_channel'\n if 'response_type' in kwargs and kwargs['response_type'] in response_types:\n response_type = str(kwargs['response_type'])\n\n return {'text': text, 'response_type': response_type}", "def competition(update, context):\n #update.message.reply_text(s)\n chat_id = update.message.chat_id\n bot.send_message(chat_id,text=message,\n parse_mode=telegram.ParseMode.HTML)\n #return s ", "def slackbuild_pubsub(data, context):\n global config\n global slack\n\n print(data)\n print(context)\n\n build, template = BuildStatus.toMessage(data, config)\n\n msg = slack.render_message(build, template)\n\n return slack.post_message(msg)", "def create_next_message(self, **kwargs):\n message = self._builder.create_message(**kwargs)\n return message", "def slackMessage(sMessage):\n sChannel = '#' + getConfig('slack', 'channel')\n print(\"Posting slack message to %s: %s\" % (sChannel, sMessage))\n requests.post(getConfig('slack', 'url'), data=json.dumps({'text': sMessage,\n 'channel': sChannel,\n 'user': getConfig('slack', 'user'),\n 'icon_emoji': getConfig('slack', 'emoji')}))", "def produce_message_for_sending() -> str:\n return f\"You can donate your money here:\\n`{card_donations}`\"", "def post_msg(text):\n client = WebClient(token=os.environ[\"SLACK_BOT_TOKEN\"])\n client.chat_postMessage(\n channel=os.environ[\"SLACK_CHANNEL\"],\n text=\"News\",\n blocks=[\n {\"type\": \"section\", \"text\": {\"type\": \"mrkdwn\", \"text\": (text)}}],\n )\n return text", "def generate_msg(props, alert=False, user_pref=None, past=False):\n\t\tmessage = emojize(\":rocket:\", use_aliases=True)\n\t\tif past:\n\t\t\tmessage += ' Launch was held on: ' + props['when'].format('YYYY-MM-DD HH:mm:ss ZZ') + '.\\n'\n\t\t\tif props['holdreason']:\n\t\t\t\tmessage += 'The launch has been *held*. Reason: ' + props['holdreason'] + '\\n'\n\t\t\tif props['failreason']:\n\t\t\t\tmessage += 'Unfortunately, the launch *failed*. Reason: ' + props['failreason'] + '\\n'\n\t\telse:\n\t\t\tif alert:\n\t\t\t\tmessage += ' *Launch is going to happen in some minutes!* '\n\t\tmessage += ' *' + props['name'] + '*' + '\\n'\n\n\t\tif not alert and not past:\n\t\t\tmessage += 'A launch will happen _' + props['when'].humanize() + '_! \\n'\n\t\t\tmessage += 'I mean ' + props['when'].format('YYYY-MM-DD HH:mm:ss ZZ') + '\\n'\n\n\t\tif past:\n\t\t\tmessage += 'Taken from *'\n\t\telse:\n\t\t\tmessage += 'Taking from *'\n\n\t\tmessage += props['location'] + '*.\\n'\n\t\tdescr = Interface.generate_description(props['missions'])\n\t\tmessage += '*Mission description*\\n' + descr + '\\n' if descr else ''\n\t\tmessage += '\\n'\n\n\t\tif props['urls']:\n\t\t\tmessage += 'Watch it here: \\n' if not past else 'You could have watched it here: \\n'\n\t\t\tfor url in props['urls']:\n\t\t\t\tmessage += ' • [' + url + '](' + url +')\\n'\n\t\telse:\n\t\t\tmessage += 'Unfortunately there '\n\t\t\tmessage += 'are' if not past else 'were'\n\t\t\tmessage += ' no reported webcasts ' \\\n\t\t\t\t\t + emojize(':disappointed_relieved:', use_aliases=True)\n\n\t\treturn message", "def create_message(user, conv_id, text, media_url, token):\n\n # method to call if user is part of the conversation\n def create_message(user, conv, response):\n user_alias = conv.get_alias_for_user(user)\n msg = ConvMessages.create(user, user_alias, conv, text, media_url)\n conv.put_message(msg)\n # send new msg to all users in this conv\n broadcast_message(msg, token)\n response['messages'] = msg.get_full_data()\n\n #Send the message to Firebase\n \n #postUrl = 'https://hailing-frequencies-2017.firebaseio.com/messages/' + conv_id + '/'\n #payload =\n #reply = requests.post(postUrl, data=payload)\n\n return response\n\n return process_apicall_checkconv_checkuser(user, conv_id, create_message)", "def format_slack_message(text, response_type):\n data = {\n 'response': response_type,\n 'text': text\n }\n return jsonify(data)", "def newJoin(user, channel):\n\n message = \"\"\"\nWelcome to the official Slack for ZenCash!\n\n\nThe official links are:\nhttps://github.com/ZenCashOfficial/\nhttps://zencashofficial.io/\n\nNOTICE:\nDue to the recent plague that is SlackBot spamming with `/remind`, please do not click any links sent to you in a DM from slackbot.\n\nAdditionally, please copy the message to #spam so the Admins can ban the user.\n\nPlease remember to be civil, and have a great day!\n\"\"\"\n\n\n # General\n if channel == 'C4QGQ8SEM':\n return message\n\n # Bottesting\n if channel == \"C5JCER3NG\":\n return message", "def execute(cls, slack_wrapper, args, channel_id, user_id, user_is_admin):\n try:\n with open(\"intro_msg\") as f:\n message = f.read()\n\n slack_wrapper.post_message(channel_id, message)\n except:\n message = \"Sorry, I forgot what I wanted to say (or the admins forgot to give me an intro message :wink:)\"\n\n slack_wrapper.post_message(channel_id, message)", "def _construct_message(self):\n self.message = {\"token\": self._auth, \"channel\": self.channel}\n super()._construct_message()", "def slack(message):\n slack_hook = 'https://hooks.slack.com/services/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'\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n requests.post(slack_hook, json.dumps({'text': message}), headers=headers)", "def generate_plain_mesg(info, open_quests, owner, tags):\n\n msg = (\n \"This email is being sent to {} because that is the owner listed\\n\"\n \"for the systems with open Hermes labors listed below.\\n\\n\"\n \"Due dates, if any, are noted with each quest.\\n\".format(owner)\n )\n msg += (\n \"\\nTo throw an event manually, you can run the following command \"\n \"on a shell server:\"\n \"\\n\\n\"\n \"$ hermes event create [event] --host [hostname].\\n\\n\"\n \"Or you can visit the quests linked below.\\n\\n\".format(\n settings.frontend)\n )\n for quest_id in info[owner]:\n quest = find_quest(open_quests, quest_id)\n if quest:\n msg += (\n \"==[ QUEST {} ]================================\\n\"\n \"CREATOR: {}\\n\"\n ).format(\n quest_id, quest.creator\n )\n if quest.target_time:\n msg += \"DUE: {}\\n\".format(quest.target_time)\n msg += \"DESC: \\\"{}\\\"\\n\".format(textwrap.fill(\n quest.description,\n width=60, subsequent_indent=\"\"\n ))\n msg += \"LINK: {}/v1/quests/{}\\n\\n\".format(\n settings.frontend, quest_id\n )\n else:\n msg += \" Labors not associated with a quest:\\n\\n\"\n\n msg += \"Machines with labors:\\n\"\n\n for hostname in sorted(info[owner][quest_id]):\n if tags[hostname]:\n tags_str = \"{}\".format((\", \".join(tags[hostname])))\n else:\n tags_str = \"no services\"\n msg += \" {} ({})\\n\".format(hostname, tags_str)\n\n msg += \"\\n\\n\"\n\n return msg", "def slackMessage(binState):\n log = logging.getLogger('iob')\n\n if binState:\n location = \"Out\"\n else:\n location = \"In\"\n \n url = \"https://hooks.slack.com/services/{}\"\n \n payload = {\"text\": \"Bin is: {}\".format(location)}\n\n headers = {\"Content-Type\": \"application/json\"}\n\n response = requests.request(\n \"POST\",\n url,\n data=json.dumps(payload),\n headers=headers\n )\n\n log.debug(response.text)\n return", "def send_code(subject, message):\n\n message = \"```\\n{}\\n```\".format(message)\n\n blocks = [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"*\" + subject + \"*\"\n }\n },\n {\"type\": \"divider\"},\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": message\n }\n },\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"\\n\\n\"\n }\n }\n ]\n\n Slacker._send_blocks(blocks)", "def comsume_msg(self, msg_type):", "def post_message(buddy_groups, channel):\n resp = \"Hi EVGR-mixers! :minion_wave: This week's random groups are as follows\"\n for buddy_group in buddy_groups:\n resp += \"\\n:coffee: \" + \" & \".join([f\"<@{buddy}>\" for buddy in buddy_group])\n resp += \"\\nFind some time this week to connect with your random buddies \"\n resp += \":blob_excited: :fireball:\"\n\n try:\n response = client.chat_postMessage(channel=channel, mrkdwn=True, text=resp)\n except SlackApiError as err:\n assert err.response[\"error\"]\n return response" ]
[ "0.6430378", "0.61646515", "0.61048555", "0.6087194", "0.60472983", "0.6007284", "0.6002899", "0.5978185", "0.5974844", "0.5949206", "0.59122825", "0.57997376", "0.57194704", "0.5707758", "0.56715614", "0.5656784", "0.5643831", "0.5628419", "0.56189126", "0.5614972", "0.55933994", "0.55807793", "0.5579447", "0.55701005", "0.55624056", "0.5562123", "0.5550662", "0.55410326", "0.55398476", "0.5538762" ]
0.6493337
0
Compile a message that can be posted to Slack after a SMS has been received
def compile_slack_sms_message(_sms_from, message): sms_from_user = _query_user(_sms_from) sms_from = _format_caller(sms_from_user, _sms_from) pretext = "Nytt SMS från %s" % (sms_from, ) fallback = "%s \n\"%s\"" % (pretext, message) return { 'attachments': [ { 'pretext': pretext, 'fallback': fallback, 'color': 'warning', 'text': message } ] }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compile_slack_phone_message(phone_from, phone_to, status, location):\n\n call_from_user = _query_user(phone_from)\n call_from = _format_caller(call_from_user, phone_from)\n\n call_to_user = _query_user(phone_to)\n call_to = _format_caller(call_to_user, phone_to)\n\n location_str = list(filter(lambda x: x[0] == location, Located.LOCATION_CHOICES))\n\n if not location_str:\n logger.error('Unknown café choice: %d' % (location,))\n location_str = 'Okänt café'\n else:\n location_str = location_str[0][1]\n\n fallback = 'Ett samtal till %s från %s har %s.' % (\n location_str,\n call_from,\n ('blivit taget av %s' if status == 'success' else 'missats av %s') % (call_to,),\n )\n\n fields = [\n {\n 'title': 'Status',\n 'value': 'Taget' if status == 'success' else 'Missat',\n 'short': True\n },\n {\n 'title': 'Café',\n 'value': location_str,\n 'short': True\n },\n {\n 'title': 'Mottagare',\n 'value': call_to,\n 'short': False\n }\n ]\n\n if call_from_user is not None and call_from_user['groups']:\n groups = call_from_user['groups']\n\n groups_str = '%s %s tillhör %s: %s.' % (\n call_from_user['first_name'],\n call_from_user['last_name'],\n 'grupperna' if len(groups) > 1 else 'gruppen',\n ', '.join(groups)\n )\n\n fallback += '\\n\\n%s' % groups_str\n fields += [\n {\n 'title': 'Grupper',\n 'value': groups_str,\n 'short': False\n }\n ]\n\n return {\n 'attachments': [\n {\n 'pretext': 'Nytt samtal från %s' % call_from,\n 'fallback': fallback,\n 'color': 'good' if status == 'success' else 'danger',\n 'fields': fields\n }\n ]\n }", "def message(**payload):\n web_client = payload[\"web_client\"]\n\n # Getting information from the response\n data = payload[\"data\"]\n channel_id = data.get(\"channel\")\n text = data.get(\"text\")\n subtype = data.get(\"subtype\")\n ts = data['ts']\n user = data.get('username') if not data.get('user') else data.get('user')\n # Creating a Converstion object\n message = Message(ts, user, text)\n\n # Appending the converstion attributes to the logs\n conversation.append(message.toDict())\n\n if subtype == 'bot_message': return\n\n do_respond(web_client, channel_id, text)", "def sms_reply():\n # Fetch the message\n msg = request.form.get('Body')\n\n # Create reply\n resp = MessagingResponse()\n resp.message(\"You said: {} \\n *Kayra Dev* \".format(msg))\n \n account_sid = 'AC195cf76c0d725909794c30f9b0c32961' \n auth_token = '70531f5d14ec79c14254cf7fdfb40bad' \n client = Client(account_sid, auth_token) \n \n message = client.messages.create( \n from_='whatsapp:+14155238886', \n body=msg, \n to='whatsapp:+237696527034' \n ) \n \n print(message.sid)\n return str(resp)", "def do_something(incoming_msg):\n return \"i did what you said - {}\".format(incoming_msg.text)", "def generate_message(ctx, question, answer):\n return preamble.format(channel=rules_channel(ctx).id) + question + answer", "def send_code(subject, message):\n\n message = \"```\\n{}\\n```\".format(message)\n\n blocks = [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"*\" + subject + \"*\"\n }\n },\n {\"type\": \"divider\"},\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": message\n }\n },\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"\\n\\n\"\n }\n }\n ]\n\n Slacker._send_blocks(blocks)", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "def sms():\n\n\t# initialize response\n\tresp = MessagingResponse()\n\n\t# add a message\n\tresp.message(\"Thank you for your response! We are confirming your message.\")\n\n\treturn str(resp)", "def get_message():\n ## Get the body of the text\n body = request.values.get('Body', None)\n print('Full message: ',body)\n ## Get the number of the sms\n senderNumber = request.values.get('From',None)\n ## call the translate function with the body of the text and get the translated text\n message, number = extractMessage(body)\n print('message stripped: ',message)\n print('number is: ',number)\n translated = translate(message)\n print('translated: ',translated)\n sendText(number, translated + ' from ' + senderNumber)\n ## respond with the translated text\n ##resp = twilio.twiml.Response()\n ##resp.message('Your message has been sent')\n ##return str(resp)\n return('Hello')", "def sms_ahoy_reply():\n # Start our response\n resp = MessagingResponse()\n if request.method == 'POST':\n msg = request.form['Body']\n joke = re.search(r'(.*)joke(.*)', msg, re.I)\n greet = re.search(r'(.*)[hi|hey|hello](.*)', msg, re.I)\n quote = re.search(r'(.*)quote(.*)', msg, re.I)\n # joke = re.search(r'(.*)joke(.*)', msg, re.I)\n\n if joke: resp.message(\"I wanted to look for my watch but I couldn't find the time!\")\n elif quote: resp.message(\"A great player is the one who makes the game look easy!\")\n elif greet: resp.message(\"Greetings! I am your assistant!\")\n\n # Add a message\n else: resp.message(\"Ahoy! You said, '\" + msg + \"'\")\n print(request.form)\n\n else: resp.message(\"Greetings! I am your assistant!\") \n\n return str(resp)", "def send_sms(to, datas, temp_id):\n cpp = CCP()\n cpp.sendTemplateSMS(to, datas, temp_id)", "def horde_message(self, message):", "def post_msg(text):\n client = WebClient(token=os.environ[\"SLACK_BOT_TOKEN\"])\n client.chat_postMessage(\n channel=os.environ[\"SLACK_CHANNEL\"],\n text=\"News\",\n blocks=[\n {\"type\": \"section\", \"text\": {\"type\": \"mrkdwn\", \"text\": (text)}}],\n )\n return text", "def slackMessage(sMessage):\n sChannel = '#' + getConfig('slack', 'channel')\n print(\"Posting slack message to %s: %s\" % (sChannel, sMessage))\n requests.post(getConfig('slack', 'url'), data=json.dumps({'text': sMessage,\n 'channel': sChannel,\n 'user': getConfig('slack', 'user'),\n 'icon_emoji': getConfig('slack', 'emoji')}))", "def sms_reply():\n # Start our TwiML response\n # if body.lower()==\"good\":\n message=\"Hi I'm IRIS, an Immediately Responsive Intelligent System\\nHow are you feeling today?\"\n user=request.form['Body']\n\n # message=\"Hi \"+ name+ \"\"\n # user=request.form['Body']\n\n if user==\"good\":\n message=\"Glad to hear it! I hope you continue to feel this way! Celebrate this feeling and hold onto what happened ot make you feel this way so that you can repeat it in the future!\"\n\n if user==\"sad\":\n message=\"I’m sorry to hear that. Here are some things I do to make me feel better: take a walk outside, listen to uplifting music, call or message a loved one, or watch or read something positive to take my mind off of what I’m feeling.\"\n\n if user==\"nervous\":\n message=\"It’s going to be ok! This feeling will not last forever.\"\n if user==\"lonely\":\n message=\"I’m here for you, and know that you are loved, supported, and important. The world would not be the same without you! For a loving quote respond\"\n\n if user==\"angry\":\n message=\"“Let me help you turn your anger into something positive. Here are some ways to burn off energy productively: take a long walk, remove yourself from the situation, paint of draw, listen to loud music, or take a break from what you are doing.\"\n\n if user==\"tired\":\n message=\"I understand what you are feeling well. I recommend taking a break to do an activity you enjoy, taking a nap, getting a coffee, doing 20 jumping jacks, listening to a pump-up playlist, or standing up to stretch for a bit.\"\n\n if user==\"average\":\n message=\"There are many things to look forward to!\"\n resp = MessagingResponse()\n\t # Add a message\n \n resp.message(message)\n\t # Add a picture message\n\t #msg.media(\"https://farm8.staticflickr.com/7090/6941316406_80b4d6d50e_z_d.jpg\")\n\n return str(resp)", "def incoming_sms():\n txt = request.form['Body']\n\n # remove leading and trailing white space and make lowercase\n txt = txt.strip()\n txt = txt.lower()\n\n # handle random searches differently than breed searches\n if txt == 'random' or txt == 'dog':\n url = get_dogs.get_random_dog()\n else:\n url = get_dogs.request_breed(txt)\n \n resp = MessagingResponse()\n if url:\n resp.message(url)\n else:\n resp.message(\"Sorry! We couldn't find a dog matching that query. Please try \\\n a more general search term.\")\n return str(resp)", "def _create_message(self, msg):\n head = msg[\"head\"]\n body = msg[\"body\"]\n body = body.format(**self.data)\n length = len(body)\n head = head.format(length=length, **self.data)\n return head + body", "def create_message(user, conv_id, text, media_url, token):\n\n # method to call if user is part of the conversation\n def create_message(user, conv, response):\n user_alias = conv.get_alias_for_user(user)\n msg = ConvMessages.create(user, user_alias, conv, text, media_url)\n conv.put_message(msg)\n # send new msg to all users in this conv\n broadcast_message(msg, token)\n response['messages'] = msg.get_full_data()\n\n #Send the message to Firebase\n \n #postUrl = 'https://hailing-frequencies-2017.firebaseio.com/messages/' + conv_id + '/'\n #payload =\n #reply = requests.post(postUrl, data=payload)\n\n return response\n\n return process_apicall_checkconv_checkuser(user, conv_id, create_message)", "def c_message(text):\n string = text\n string = string.replace('_', ' ')\n return \"C {}\".format(string)", "def send_message_to_slack(text):\n\n try:\n post = {\n \"text\": \":fire: :sad_parrot: *SSL Certificate BACKUP SCRIPT Status for HTTPD Proxy:* :sad_parrot: :fire:\",\n \"attachments\": [\n {\n \"text\": \"{0}\".format(text),\n \"color\": \"#B22222\",\n \"attachment_type\": \"default\",\n \"fields\": [\n {\n \"title\": \"Priority\",\n \"value\": \"High\",\n \"short\": \"false\"\n }\n ],\n \"footer\": \"AWS HTTPD\",\n \"footer_icon\": \"https://platform.slack-edge.com/img/default_application_icon.png\"\n }\n ]\n }\n\n ssm_param_name = 'slack_notification_webhook'\n ssm = boto3.client('ssm', config=CONFIG, region_name='eu-west-2')\n try:\n response = ssm.get_parameter(\n Name=ssm_param_name, WithDecryption=True)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ParameterNotFound':\n LOGGER.info(\n 'Slack SSM parameter %s not found. No notification sent', ssm_param_name)\n return\n else:\n logging.error(\n \"Unexpected error when attempting to get Slack webhook URL: %s\", e)\n return\n if 'Value' in response['Parameter']:\n url = response['Parameter']['Value']\n\n json_data = json.dumps(post)\n req = urllib.request.Request(\n url,\n data=json_data.encode('ascii'),\n headers={'Content-Type': 'application/json'})\n LOGGER.info('Sending notification to Slack')\n response = urllib.request.urlopen(req)\n\n else:\n LOGGER.info(\n 'Value for Slack SSM parameter %s not found. No notification sent', ssm_param_name)\n return\n\n except Exception as err:\n logging.error(\n 'The following error has occurred on line: %s',\n sys.exc_info()[2].tb_lineno)\n logging.error(str(err))", "def makemsg2write(innermsg, inputtext=''):\n nowtuple = time.time()\n nowdatetime = datetime.datetime.fromtimestamp(nowtuple)\n finnalmsg = {'fmId': math.floor(nowtuple),\n 'fmTime': nowdatetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'fmSend': True, 'fmSender': innermsg['fmSender'],\n 'fmType': 'Text',\n 'fmText': f\"{inputtext}\"\n }\n writefmmsg2txtandmaybeevernotetoo(finnalmsg)", "def send_sms(self, body):\n message = self.twilio_client.sms.messages.create(to=self.to_num, from_=self.from_num, body=body)", "def message_of(cfg, ticket, phase):\n return cfg[\"message_template\"] % (ticket, text(cfg, phase))", "def message_body_messenger(self) -> str:\n ...", "def tweet(msg):\r\n m = \"\\n{}\\n\".format(msg)\r\n arcpy.AddMessage(m)\r\n print(m)\r\n print(arcpy.GetMessages())", "def get_message(api):\n\n # List to hold the extracted message. Using a list rather than a string because a string is immutable.\n # Not good practice to continually modify an immutable\n encoded_msg_list = []\n\n # Go through each message in the timeline we're posting the tweets to\n for status in tweepy.Cursor(api.user_timeline, screen_name='@DailyDoseOfSad1', tweet_mode=\"extended\").items():\n if status.full_text[-6] == '#':\n encoded_msg_list.insert(0, status.full_text[\n -7]) # Prepend the last character (the sensitive message) into a list\n elif status.full_text[-7] == '#':\n encoded_msg_list.insert(0, status.full_text[\n -8]) # Prepend the last character (the sensitive message) into a list\n\n # List to hold binary message\n msg_list = []\n\n # Post process the string. A . is a 0 and a ! is a 1.\n for char in encoded_msg_list:\n if char == \".\":\n msg_list.append('0')\n elif char == '!':\n msg_list.append('1')\n\n # Join the list to an empty string once it's full, to avoid memory allocation to immutable string type\n return ''.join(msg_list)", "def send_invite_sms(profile, template_name, extra_context):\n c = {'profile': profile}\n c.update(extra_context or {})\n body = loader.render_to_string(template_name, c).strip()\n if len(body) <= 160:\n messages = [body.replace(\"\\n\", \" \")]\n else:\n messages = body.split(\"\\n\")\n for body in messages:\n profile.send_sms(body)", "def send_message(self):\n self.preprocess_text()\n message_text = self.create_message_text()\n \n telnyx.Message.create(\n from_=configs.source_number,\n to=self.destination_number,\n text=message_text,\n )", "def slack(message):\n slack_hook = 'https://hooks.slack.com/services/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'\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n requests.post(slack_hook, json.dumps({'text': message}), headers=headers)", "def build_message(cmd, data):\r\n\tif len(cmd) > CMD_FIELD_LENGTH or len(data) > MAX_DATA_LENGTH:\r\n\t\treturn None\r\n\tfull_cmd = cmd + \" \"*(CMD_FIELD_LENGTH-len(cmd))\r\n\tdata_len = str(len(data))\r\n\tfull_data_len = \"0\"*(LENGTH_FIELD_LENGTH-len(data_len))+data_len\r\n\tfull_msg = DELIMITER.join([full_cmd, full_data_len, data])\r\n\treturn full_msg" ]
[ "0.67178714", "0.6370651", "0.6323726", "0.63050747", "0.6260869", "0.62185776", "0.619205", "0.6175053", "0.6171278", "0.6103282", "0.6072457", "0.60609436", "0.60509944", "0.60441846", "0.60350686", "0.6020911", "0.6018892", "0.5999021", "0.5975836", "0.59454054", "0.5932026", "0.59264535", "0.5891138", "0.5886964", "0.588257", "0.5873873", "0.5866586", "0.5865875", "0.58607835", "0.58603495" ]
0.71974516
0
Retrieves first name, last name and groups corresponding to a phone number from the database, if it exists. If multiple users have the same number, none will be queried
def _query_user(phone): if not is_valid_phone_number(phone): return None try: user = Profile.objects.get(mobile_phone=_remove_area_code(phone)).user return { 'first_name': user.first_name, 'last_name': user.last_name, 'groups': [group.name if group.name[0] != '_' else group.name[1:] for group in user.groups.all()] } except (ObjectDoesNotExist, MultipleObjectsReturned): # Expected output for a lot of calls. Not an error. return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user(conn ,phone_number: str) -> Tuple[str, List[str], str]:\n with conn.cursor() as cur:\n\n # Get user info from db.\n cur.execute(\"SELECT * FROM users WHERE phone_number = %s\", (phone_number,))\n usr = cur.fetchone()\n if usr is None:\n return None\n return usr", "def get(self, phone_number: str):\r\n args = authParser.parse_args()\r\n\r\n first_three = phone_number[:3]\r\n\r\n if first_three not in prefix_list and first_three != \"+23\":\r\n response = {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Input in a valid phone-number\"\r\n }\r\n }\r\n return response, http.client.BAD_REQUEST\r\n\r\n if len(phone_number) == 11 or len(phone_number) == 14:\r\n user = (UserModel.query.filter(\r\n UserModel.phone_number == phone_number).first())\r\n\r\n if not user:\r\n response = {\r\n \"status\": \"error\",\r\n \"detials\": {\r\n \"message\": \"User with phone number doesnt exist\"\r\n }\r\n }\r\n return response, http.client.NOT_FOUND\r\n\r\n user = UserModel.query.filter(\r\n UserModel.phone_number == phone_number).first()\r\n\r\n if not user:\r\n # The email doesnt exist\r\n return {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Not Found\"\r\n }\r\n }, http.client.OK\r\n user = admin_namespace.marshal(user, user_model)\r\n return {\r\n \"status\": \"success\",\r\n \"details\": {\r\n \"result\": user\r\n }\r\n }, http.client.OK", "def __ui_search_persons_by_phone_number(self):\n searched_phone_number = input(\"Introduce the phone number: \").strip().lower()\n if searched_phone_number == \"\":\n print(\"You cannot search persons by an empty phone number!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_phone_number(searched_phone_number)\n\n if len(searched_persons) == 0:\n print('There is no person whose phone number matches with \"{}\"!\\n'.format(searched_phone_number))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def ldap_get_number(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n number = result.get(\"mobile-phone\")[0]\n return number\n\n return None", "def get_groups(phone_num):\n phone_num = strip_phone_num(phone_num) # Get last 9 digits\n user = User.query.filter_by(phone_num=phone_num).first()\n\n if not user:\n return {\n \"success\": False,\n \"msg\": \"User does not exist.\"\n }\n\n payload = {}\n payload[\"groups\"] = []\n groups = user.groups\n for group in groups:\n group_dict = {\n \"id\": group.id,\n \"name\": group.name\n }\n\n members = [{\n \"nickname\": user.nickname,\n \"phone_num\": \"0\" + user.phone_num,\n \"balance\": None\n }]\n for assoc in user.group_associations:\n if assoc.group_id != group.id:\n continue\n\n associate = User.query.filter_by(id=assoc.associate_id).first()\n\n members.append({\n \"nickname\": assoc.associate_nickname,\n \"phone_num\": '0' + associate.phone_num,\n \"balance\": assoc.balance\n })\n\n group_dict[\"members\"] = members\n payload[\"groups\"].append(group_dict)\n\n return {\n \"success\": True,\n \"groups\" : payload[\"groups\"]\n }", "def harvest_by_phone(client, phone):\n try:\n entity = client(users.GetFullUserRequest(id=phone))\n except ValueError:\n return 'There is no account connected to this phone number'\n\n return harvest_user(client, entity)", "def phonenumber_in_db(self, phonenumber, users_list):\n return self.user_in_db(phonenumber, users_list, \"phonenumber\")", "def get_user_by_phone(self, phone):\n sql = 'select id ,first_name' \\\n ',last_name' \\\n ',password' \\\n ',phone ' \\\n 'from account_user ' \\\n 'where phone = %s'\n user = User.objects.raw(sql, [phone])[0];\n return user", "def get_user_by_phone(phone_num):\n\n user = db.session.query(User).filter(phone_num == User.phone_num)\n return user\n \n # User.query.filter(User.phone_num == phone_num).one()", "def get_user_by_phone(phone_num):\n\n user = db.session.query(User).filter(phone_num == User.phone_num).first()\n return user\n \n # SELECT * FROM users WHERE phone_num == phone_num\n # User.query.filter(User.phone_num == phone_num).one()", "def search_by_phone_number(self, phone_number):\r\n if len(re.findall(\"[^0-9-+ ]+\", phone_number)) or len([c for c in phone_number if c == '+']) > 1:\r\n raise PersonPhoneNumberException(\"Invalid phone number search input. Can only contain digits, hyphens,\"\r\n \"spaces, and a plus sign(+).\")\r\n phone_number = phone_number.replace(' ', '')\r\n phone_number = phone_number.replace('-', '')\r\n phone_number = phone_number.replace('+4', '')\r\n return self.__filter(self.get_all_persons(), lambda x: phone_number in x.phone_number.replace(' ', ''))", "def get_user_or_placeholder(phone_num, nickname):\n if not SWE_PHONENUM_RE.match(phone_num):\n return {\n \"success\": False,\n \"msg\": \"Swedish format is required for phone number.\"\n }\n\n if not NICKNAME_RE.match(nickname):\n return {\n \"success\": False,\n \"msg\": \"Nicknames need to be 2-30 characters long and can only contain letters, numbers, spaces, dashes and underscores.\"\n }\n\n phone_num = strip_phone_num(phone_num) # Get last 9 digits\n user = User.query.filter_by(phone_num=phone_num).first()\n if not user:\n # Create placeholder until a user registers with associate_phone\n user = User(phone_num=phone_num, active=False)\n if not NICKNAME_RE.match(nickname):\n return {\n \"success\": False,\n \"msg\": \"Not a valid nickname.\"\n }\n user.nickname = nickname\n\n db.session.add(user)\n db.session.commit()\n\n return {\n \"success\": True,\n \"user\": user\n }", "def get_groups(phone_num):\n\n phone_num = json.loads(phone_num)\n resp = con.get_groups(phone_num)\n\n emit(\"groups_update\", json.dumps(resp))", "def readrecord(phones,username,phonenum):\r\n if username in phones:\r\n raise ValueError(username+ \":\"+phones[username])\r\n else:\r\n raise ValueError(\"This username are not exist\")", "async def get_by_phones(\n self,\n\t\tphones: Optional[List[str]] = None,\n\t\tfields: Optional[List[UsersFields]] = None,\n\t\t**kwargs\n ) -> friends.GetByPhonesResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.getByPhones\", params)\n model = friends.GetByPhonesResponse\n return model(**response).response", "def get_phone_number(user_id):\n try:\n student = _UserProfile.objects.get(user_id=user_id)\n except _UserProfile.DoesNotExist as exception:\n log.exception(exception)\n return None\n return student.phone_number or None", "def find_partner_from_phone_number(self, cr, uid, phone_number, context=None):\n _logger.debug('Phone number: %s' % phone_number)\n if context is None:\n context = self.pool.get('res.users').context_get(cr, uid, context=context)\n\n search_args = [\n '|',\n ('phone', '=', phone_number),\n ('mobile', '=', phone_number),\n ]\n address_obj = self.pool.get('res.partner.address')\n address_ids = address_obj.search(cr, uid, search_args, context=context)\n if not address_ids:\n return False, False\n\n address_id = address_ids[0]\n partner_id = address_obj.browse(cr, uid, address_id, context=context).partner_id\n partner_id = partner_id and partner_id.id or False\n\n return partner_id, address_id", "def getByName(database,firstname):\n correspondant=[]\n for key,usr in database.items():\n if firstname == usr.firstname:\n correspondant.append(usr)\n if len(correspondant)==0:\n print(f\"there is no user named {firstname}\")\n return 0, False\n if len(correspondant)>1:\n print(f\"there are many users named {firstname}\")\n lastname=input(\"Whar is his last name\")\n for usr in correspondant:\n if usr.lastname==lastname:\n return usr,True\n else:\n return correspondant[0],True", "def lookup_phone_number(phone):\n \n #create Twilio client\n client = Client(ACCOUNT_SID, AUTH_TOKEN)\n\n try:\n\n #check if number is real number using Twilio lookup\n phone_number = client.lookups \\\n .phone_numbers(phone) \\\n .fetch(type=['carrier'])\n\n #returns formmatted phone number\n return phone_number.phone_number\n\n #checks Twilio exception responses if number not real\n except TwilioRestException as e:\n\n #Number not found - return False\n if e.code == 20404:\n\n return False\n\n else:\n\n raise e", "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def searchForPhone(phoneName, attributes=['uuid']):\n returnedTagsForApi = {}\n for attr in attributes:\n returnedTagsForApi[attr] = True\n phoneJson = client.service\\\n .listPhone(searchCriteria={'name': '%s%s' % ('CSF', phoneName)}, returnedTags=returnedTagsForApi)\n if (not phoneJson['return'] or not phoneJson['return']['phone']):\n return None\n if (len(phoneJson['return']['phone']) > 1):\n raise RuntimeError('found more then one phone with the same name, the name => ' + phoneName)\n return phoneJson['return']['phone'][0]", "def get(self, phone):\n\n #args = argParser()\n #phone = args.parse_args().get(\"fromPhone\")\n\n if not UserExist(phone):\n return jsonify(generateReturnDictionary(301, \"Sorry, Mobile Wallet Account does not exists!, create an account.\", \"FAILURE\"))\n\n try:\n retJson = mongo.db.Register.find({\n \"Phone\": phone\n }, {\n \"Password\":0, # projection\n \"_id\":0,\n \"FirstName\":0,\n \"LastName\":0,\n \"Email\":0,\n \"Phone\":0,\n \"Network\":0,\n \"Username\":0,\n \"Password\":0,\n \"Debt\":0,\n \"DateTimeCreated\":0,\n \"apiKeys\":0\n })[0]\n return make_response(jsonify(retJson), 200)\n except Exception as e:\n retJson = {\n \"code\": 409,\n \"message\": \"There was an error while trying to check your wallect balance -> , try again!\",\n \"status\": \"FAILURE: {0}\".format(e.message)\n }\n return jsonify(retJson)", "def get(self):\n args = GET_PARSER.parse_args()\n print(f'args={args}')\n\n return Contacts().get_all(\n args[\"phonetypeOne\"],\n args[\"phonetypeTwo\"],\n args[\"phonetypeThree\"],\n args[\"firstName\"],\n args[\"lastName\"],)", "def validate_phone(self, data):\n value = data.strip()\n if re.match(constant.NUMBER_ONLY, value):\n if User.objects.filter(phone=value).exists():\n raise serializers.ValidationError('phone number already registered')\n return value\n raise serializers.ValidationError(VALIDATION['phone']['invalid'])", "def validate_phone_number(self, phone_number):\n if User.objects.filter(phone_number=phone_number).exists():\n raise serializers.ValidationError('Phone Number already registered.')\n return phone_number", "def find_entry(key):\n found_list = []\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n for k in db:\n name = str(k).lower()\n phone = str(db[k])\n if (name.find(key.lower())) >= 0 or (phone.find(key.lower()) >= 0):\n person = Person()\n person.name = k\n person.phone = db[k]\n found_list.append(person)\n display_list(found_list)\n db.close()", "def ldap_get_firstname(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n firstname = result.get(\"first-name\")[0]\n return firstname\n\n return None", "def is_valid_user_by_phone_number(phone_number):\n count = db.users.filter(and_(db.users.phone_number == phone_number, db.users.is_validate == True))\n if count == 1:\n return True", "def check_record(d):\n\n print(\"\\nEnter the name of the person you'd like to check\")\n firstname = input('First name: ')\n lastname = input('Last name: ')\n\n for pid in d:\n if firstname == d[pid].get('First name') and lastname == d[pid].get('Last name'):\n print('\\n# The contact is already in the phone book')\n return d\n print('\\n# The contact is not in the phone book')", "def get_ldap_contact(ldap_conn, base_dn, employee_number, unique_id, attrs, cache):\r\n search_filter = '{0}={1}'.format(unique_id, employee_number)\r\n results = ldap_conn.search_s(base_dn, ldap.SCOPE_SUBTREE, search_filter, attrs)\r\n contact_found = {}\r\n if results:\r\n attrs_found = results[0][1]\r\n # cache the dn for the employee_number\r\n cache[employee_number] = results[0][0]\r\n for key in attrs:\r\n if key in attrs_found:\r\n contact_found[key] = attrs_found[key][0]\r\n else:\r\n contact_found[key] = False\r\n else:\r\n logging.warning('Cannot found employee in ldap ' + employee_number)\r\n return contact_found" ]
[ "0.6712627", "0.6684366", "0.6540897", "0.6530467", "0.64618504", "0.64562565", "0.6306256", "0.62955487", "0.6291792", "0.62606305", "0.6074895", "0.6006919", "0.5898004", "0.57387257", "0.5648351", "0.5647391", "0.5595597", "0.5593799", "0.55526763", "0.549593", "0.5446735", "0.54459864", "0.5427164", "0.54091495", "0.5395205", "0.53938526", "0.5348805", "0.5332115", "0.52708197", "0.52685547" ]
0.75981003
0
Formats caller information into a readable string
def _format_caller(call_user, phone): # The phone number is private or not provided if not phone: return 'dolt nummer' if is_valid_phone_number(phone): # Set the phone number as a clickable link caller = '<tel:%s|%s>' % (phone, phone) else: caller = phone if call_user is not None: caller = '%s %s (%s)' % ( call_user['first_name'], call_user['last_name'], caller ) return caller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_caller(self):\n import traceback\n print '\\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()])", "def format_call(func, args, kwargs, object_name=\"Memory\"):\r\n path, signature = format_signature(func, *args, **kwargs)\r\n msg = '%s\\n[%s] Calling %s...\\n%s' % (80 * '_', object_name,\r\n path, signature)\r\n return msg\r\n # XXX: Not using logging framework\r\n #self.debug(msg)\r", "def debug_caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def _get_caller_detail(n=2):\n if not _show_caller_details:\n return None\n s = inspect.stack()[:n + 1]\n try:\n frame = s[n]\n try:\n return frame[1]\n # WARNING(dhellmann): Using frame.lineno to include the\n # line number in the return value causes some sort of\n # memory or stack corruption that manifests in values not\n # being cleaned up in the cfgfilter tests.\n # return '%s:%s' % (frame[1], frame[2])\n finally:\n del frame\n finally:\n del s", "def caller_name(self, skip=6):\r\n stack = inspect.stack()\r\n start = 0 + skip\r\n if len(stack) < start + 1:\r\n return ''\r\n parentframe = stack[start][0] \r\n\r\n name = []\r\n module = inspect.getmodule(parentframe)\r\n # `modname` can be None when frame is executed directly in console\r\n # TODO(techtonik): consider using __main__\r\n if module:\r\n name.append(module.__name__)\r\n # detect classname\r\n if 'self' in parentframe.f_locals:\r\n # I don't know any way to detect call from the object method\r\n # XXX: there seems to be no way to detect static method call - it will\r\n # be just a function call\r\n name.append(parentframe.f_locals['self'].__class__.__name__)\r\n codename = parentframe.f_code.co_name\r\n if codename != '<module>': # top level usually\r\n name.append( codename ) # function or a method\r\n\r\n ## Avoid circular refs and frame leaks\r\n # https://docs.python.org/2.7/library/inspect.html#the-interpreter-stack\r\n del parentframe, stack\r\n\r\n return \".\".join(name)", "def call_spec_string():\n # pylint: disable=protected-access\n frame = sys._getframe(1)\n argvals = inspect.getargvalues(frame)\n if argvals.args[0] == 'self':\n return inspect.formatargvalues(argvals.args[1:], *argvals[1:])\n else:\n return inspect.formatargvalues(*argvals)", "def caller_reference(self) -> str:\n return pulumi.get(self, \"caller_reference\")", "def callersName():\r\n import sys\r\n return sys._getframe(2).f_code.co_name", "def format(self) -> str:", "def getCallerName(self,frameLevel=1):\n self.getCallerParams(frameLevel)\n result=self.callerName\n return result", "def get_caller_name(depth=2, mod=True, cls=False, mth=False):\n stack = inspect.stack()\n start = 0 + depth\n if len(stack) < start + 1:\n return ''\n parent_frame = stack[start][0]\n name = []\n module = inspect.getmodule(parent_frame)\n if module and mod:\n name.append(module.__name__)\n if cls and 'self' in parent_frame.f_locals:\n name.append(parent_frame.f_locals['self'].__class__.__name__)\n if mth:\n codename = parent_frame.f_code.co_name\n if codename != '<module>':\n name.append(codename)\n del parent_frame, stack\n return '.'.join(name)", "def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0]\n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def caller_name(skip=2):\n stack = inspect.stack()\n start = 0 + skip\n if len(stack) < start + 1:\n return ''\n parentframe = stack[start][0] \n \n name = []\n module = inspect.getmodule(parentframe)\n # `modname` can be None when frame is executed directly in console\n # TODO(techtonik): consider using __main__\n if module:\n name.append(module.__name__)\n # detect classname\n if 'self' in parentframe.f_locals:\n # I don't know any way to detect call from the object method\n # XXX: there seems to be no way to detect static method call - it will\n # be just a function call\n name.append(parentframe.f_locals['self'].__class__.__name__)\n codename = parentframe.f_code.co_name\n if codename != '<module>': # top level usually\n name.append( codename ) # function or a method\n del parentframe\n return \".\".join(name)", "def get_caller_context(depth=None, **kwarg):\r\n if TIK_ERROR_MSG.api_source_info is not None:\r\n return TIK_ERROR_MSG.api_source_info\r\n if depth is None:\r\n raise RuntimeError(\"There are two reasons for the error:\\n\"\r\n \"If it is called by the user, please register source\"\r\n \" info before entering decorators;\\n\"\r\n \"If it is an internal call, please specify \"\r\n \"the stack depth;\")\r\n additional_stack = kwarg.get('stack_depth', 0)\r\n depth += additional_stack\r\n if ERROR_MSG_LEVEL.err_msg_level == 0:\r\n caller = stack(depth)\r\n else:\r\n caller = current_frame(depth)\r\n return caller", "def _format_call(value: ast3.Call, context: types.Context) -> typing.Text:\n\ttry:\n\t\treturn _format_call_horizontal(value, context)\n\texcept errors.NotPossible:\n\t\treturn _format_call_vertical(value, context)", "def calling_stack_info(print_res=True, code_context=1):\n\n start_frame = inspect.currentframe().f_back\n\n fil = generate_frame_list_info(start_frame, code_context=code_context)\n\n if print_res:\n # noinspection PyUnresolvedReferences\n print(fil.tb_txt)\n return fil", "def info(self):\n return f\"{self.get_first_name}, {self.get_last_name}. {self.get_age} y.o. #{self.get_id_number}\"", "def format_stack_entry(self, frame_lineno, lprefix=': '):\n import linecache, reprlib\n frame, lineno = frame_lineno\n filename = self.canonic(frame.f_code.co_filename)\n s = '%s(%r)' % (filename, lineno)\n if frame.f_code.co_name:\n s += frame.f_code.co_name\n else:\n s += \"<lambda>\"\n s += '()'\n if '__return__' in frame.f_locals:\n rv = frame.f_locals['__return__']\n s += '->'\n s += reprlib.repr(rv)\n line = linecache.getline(filename, lineno, frame.f_globals)\n if line:\n s += lprefix + line.strip()\n return s", "def logger_format(self) -> str:\n\t\treturn ('%(asctime) -19s | %(levelname) -8s | %(threadName) -10s | '\n\t\t\t\t'%(funcName) -16s | %(message)s')", "def who_is_calling():\n return sys._getframe(2).f_code.co_name", "def get_contact_info(self):\n return f\"Contact {self} at {self.email}\"", "def format_call(self, from_addr, to_addr, uuid):\n return self.template.format(\n from_addr=from_addr, to_addr=to_addr, uuid=uuid)", "def format_stack_trace(exc_info):\n if exc_info[0] is None:\n return ''\n lines = traceback.format_exception(*exc_info)\n return ''.join(line for line in lines)", "def caller_info(self):\n\n frames = traceback.extract_stack()\n frames.reverse()\n try:\n (_, mod_name) = __name__.rsplit('.', 1)\n except ValueError:\n mod_name = __name__\n for (fpath, lnum, _, _) in frames:\n (fname, _) = os.path.basename(fpath).rsplit('.', 1)\n if fname != mod_name:\n break\n\n return (fname, lnum)", "def get_caller_name(*, caller_file: str) -> str:\n\n file_path, file_name_with_ext = os.path.split(caller_file)\n file_name, file_ext = os.path.splitext(file_name_with_ext)\n return file_name", "def __get_caller_name(caller_frame):\n\n caller_name = caller_frame.f_code.co_name\n if 'self' in caller_frame.f_locals:\n caller_name = \"%s.%s\" % (\n caller_frame.f_locals['self'].__class__.__name__, caller_name\n )\n module = inspect.getmodule(caller_frame)\n if module:\n caller_name = \"%s.%s\" % (module.__name__, caller_name)\n return caller_name", "def printHeader(self,debugLevel=0):\n if self.headerLogger.level==logging.DEBUG:\n self.getCallerParams()\n self.headerLogger.debug('Calling method %s with arguments %s'%(self.callerName,self.callerLocals))\n if ((debugLevel==0) or \\\n (self.callerName in self.settings.debugAllowed) or ('all' in self.settings.debugAllowed)) \\\n and ((debugLevel in self.settings.config.getList(\"log\", \"debugAllowedLevels\") )) \\\n and (self.callerName not in self.settings.config.getList(\"log\", \"debugRestricted\")):\n print 'Calling method %s with arguments %s'%(self.callerName,self.callerLocals)\n #else hiddenMessagesLog.append(message) # Dropped in 0.24 because of loggers.", "def get_call_string(self) -> Optional[str]: # noqa\n call_repr = get_call_string(self.func_name, self.args, self.kwargs, max_length=75)\n return call_repr", "def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)", "def format(self, *args, **kwargs) -> String:\n pass" ]
[ "0.7178175", "0.6633771", "0.6473987", "0.6436923", "0.63126194", "0.6247994", "0.616689", "0.61521333", "0.6140672", "0.6119034", "0.60761124", "0.6044592", "0.60336864", "0.6026234", "0.5985547", "0.5975166", "0.594615", "0.5940507", "0.59292597", "0.59132135", "0.5875647", "0.5860099", "0.58537674", "0.58505625", "0.5841073", "0.58391505", "0.5812968", "0.58114547", "0.57969093", "0.57911104" ]
0.739633
0
Removes the area code (+46) from the given phone number and replaces it with 0
def _remove_area_code(phone): if not phone.startswith('+46'): return phone else: return '0' + phone[3:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_phone(number):\n numberlist = re.findall(\"\\d\",number)\n new_number = \"\".join(numberlist)\n if len(new_number) == 8:\n \tnew_number = \"010\" + new_number\n\tnew_number = new_number[-11:]\n\tif new_number.startswith('1'):\n\t\tnew_number = \"+86-\" + new_number\n\telse:\n\t\tnew_number = \"+86-10-\" + new_number[-8:]\n\treturn new_number", "def phone(raw_phone):\n\n phone = raw_phone.replace('+33', '0')\n phone = '{} {} {} {} {}'.format(\n phone[0:2],\n phone[2:4],\n phone[4:6],\n phone[6:8],\n phone[8:10])\n return phone", "def grab_area_code(phone_number):\r\n #number of form +1 XXX XXX XXXX (this should be the form get_twilio_client provides)\r\n if \"+1\" == phone_number[:2]:\r\n return phone_number[2:5]\r\n # number of form 1 XXX XXX XXXX\r\n if len(phone_number) == 11 and phone_number[0] == '1':\r\n return phone_number[1:4]\r\n # number of form XXX XXX XXXX\r\n if len(phone_number) == 10:\r\n return phone_number[:3]\r\n raise BadPhoneNumberError('\"%s\" is an invalid phone number.' % phone_number)", "def padded_area_code(phone_number):\r\n area_code = grab_area_code(phone_number)\r\n return area_code + \"*******\"", "def strip_phone_prefix(self, phone_num):\n # FIXME more accurate check\n if phone_num.startswith('+86'):\n return phone_num.replace('+86', '')\n if len(phone_num) != 11:\n return None\n return phone_num", "def compact(number):\n number = clean(number, ' ').upper().strip()\n if number.startswith('AL'):\n number = number[2:]\n if number.startswith('(AL)'):\n number = number[4:]\n return number", "def strip(phone):\n return re.sub('\\D', '', Phone.normalize(phone))", "def clean_phone(self):\n phone = self.cleaned_data['phone']\n if phone.startswith('8') and len(phone) > 7:\n return phone.replace('8', '+7', 1)\n\n return phone", "def remove_phone(body):\r\n phone = re.compile('[0-9]{7}|[0-9]{3}[\\- ][0-9]{3}[\\- ][0-9]{4}|[0-9]{10}|\\([0-9]{3}\\)[\\- ][0-9]{3}[\\- ][0-9]{4}')\r\n body = re.sub(phone, 'phone', body)\r\n return body", "def test_address__normalize_phone_number__7():\n assert '+421234007891' == normalize_phone_number(\n '0042-1234/0078-91', '+49')", "def telephone(value, arg=None):\n \n # Normalise a number\n value = value.replace(\" \", \"\").replace(\"-\", \"\")\n if value.startswith(\"0\"):\n value = \"+44\" + value[1:]\n normalised = value\n \n # Check if it's a number which is formatted in a special way\n if normalised in UNUSUAL_NUMBERS:\n value = UNUSUAL_NUMBERS[normalised]\n else:\n # Figure out how to format that number\n \n # Convert UK numbers into national format\n if value.startswith(\"+44\"):\n value = \"0\" + value[3:]\n \n # Now apply rules on how to split up area codes\n if value[:8] in ('01332050', '01382006'):\n # Direct dial only\n value = value[:5] + \" \" + value[5:]\n elif value[:7] in ('0141005', '0117101') or value[:6] in ('011800',):\n # Direct dial only\n value = value[:4] + \" \" + value[4:7] + \" \" + value[7:]\n elif value[:7] in ('0200003',):\n # Direct dial only\n value = value[:3] + \" \" + value[3:7] + \" \" + value[7:]\n elif value.startswith('01'):\n if value[2] == '1' or value[3] == '1':\n # 4 digit area codes\n area_code = value[:4]\n local_part = value[4:7] + \" \" + value[7:]\n elif value[:6] in (\n '013873', # Langholm\n '015242', # Hornby\n '015394', # Hawkshead\n '015395', # Grange-over-Sands\n '015396', # Sedbergh\n '016973', # Wigton\n '016974', # Raughton Head\n '016977', # Brampton\n '017683', # Appleby\n '017684', # Pooley Bridge\n '017687', # Keswick\n '019467', # Gosforth\n ):\n # 6 digit area codes\n area_code = value[:4] + \" \" + value[4:6]\n local_part = value[6:]\n else:\n # 5 digit\n area_code = value[:5]\n local_part = value[5:]\n \n value = \"(%s) %s\" % (area_code, local_part)\n \n elif value.startswith('02'):\n # 3 digit area codes\n value = \"(%s) %s %s\" % (value[:3], value[3:7], value[7:])\n \n elif value.startswith('0500') or value.startswith('0800'):\n # direct dial - 4 digit prefix, short following\n value = \"%s %s\" % (value[:4], value[4:])\n \n elif value.startswith('03') or value.startswith('08') or value.startswith('09'):\n # direct dial - 4 digit prefix\n value = \"%s %s %s\" % (value[:4], value[4:7], value[7:])\n \n elif value.startswith('05') or value.startswith('070'):\n # direct dial - 3 digit prefix\n value = \"%s %s %s\" % (value[:3], value[3:7], value[7:])\n \n elif value.startswith('07'):\n # direct dial - 5 digit prefix, short following\n value = \"%s %s\" % (value[:5], value[5:])\n\n # Now apply University rules:\n if value[:10] in ('(01865) 27', '(01865) 28', '(01865) 43', '(01865) 61'):\n # Oxford - list of internal number prefixes here:\n # http://www.oucs.ox.ac.uk/telecom/directories/intdiraccess.xml\n value = \"(01865 \" + value[8] + \")\" + value[9:]\n\n if arg == 'nolink':\n return value\n else:\n return mark_safe('<a href=\"tel:%s\">%s</a>' % (normalised, value))", "def normalize(phone):\n d = re.sub('\\D', '', phone)\n return '+7 (%s) %s-%s-%s' % (d[1:4], d[4:7], d[7:9], d[9:11])", "def test_address__normalize_phone_number__6():\n assert '+421234567891' == normalize_phone_number(\n '0042-1234/5678-91', '+49')", "def test_address__normalize_phone_number__4():\n assert '+491234507090' == normalize_phone_number('01234/5070-90', '+49')", "def strip_non_num(phone):\n return ''.join([i for i in phone if i.isdigit()])", "def test_address__normalize_phone_number__2():\n assert '+491234567890' == normalize_phone_number(\n '+49 (1234) 5678 - 90X', '+49')", "def format_and_validate_phonenumber(number):\n \n if number.startswith('+'):\n number = number.replace('+', '00', 1)\n \n regex = re.compile('(\\/|\\+|-| )')\n number = regex.sub('', number)\n \n if number.startswith(COUNTRY_CODE_PHONE):\n number = number.replace(COUNTRY_CODE_PHONE, '0', 1)\n\n # if the conversion to int does not fail then\n # there are only numbers included in the string\n try:\n int(number)\n except ValueError:\n raise ValidationError(_('Please enter numbers only.'))\n \n if number.startswith(START_MOBILE_PHONE):\n return number\n else:\n raise ValidationError(_('Please enter a cell phone number.'))", "def clean_phone(self):\n data = self.cleaned_data['phone']\n data = data.strip(' +').replace('-', '')\n if len(data) == 12:\n data = data[3:]\n\n return data", "def clean_incident_zip(zipcode):\n zipcode = str(zipcode).replace('.0', '')[:5]\n try:\n zipcode = int(zipcode)\n except:\n return None\n # Pad it on the left with '0's\n zipcode = '{:05}'.format(zipcode)\n return zipcode", "def test_address__normalize_phone_number__3():\n assert '+491234567891' == normalize_phone_number('01234/5678-91', '+49')", "def formatPostalCode(string):\n if string.isdigit():\n return int(string)\n else :\n return 0", "def fix_crappy_phone_number_formatting(phone_number):\n m = re.match(r'(\\d)?.?(\\d{3})\\D*(\\d{3})\\D*(\\d{4})\\D*(\\d*)$', phone_number)\n if m:\n fixed_number = f'+{m.group(1) or \"1\"}({m.group(2)}){m.group(3)}-{m.group(4)} {\"x\"+m.group(5) if m.group(5) else \"\"}'\n return fixed_number", "def clean_phone(number_str):\n number_str = number_str or ''\n number_str = number_str.replace('(', '').replace(')', '')\n number_str = number_str.replace('ext. ', 'x').replace('ext ', 'x')\n number_str = number_str.split(',')[0].strip()\n\n if number_str:\n return number_str", "def update_phone(phone, phone_mapping):\n results = []\n for iphone in re.split(',|;',phone):\n patterns = phone_pattern_re.search(iphone)\n if patterns:\n numbers = patterns.groups()\n if numbers[0] == \"852\":\n results.append(re.compile(r'\\D?(\\d{0,4}?)\\D{0,2}(\\d{4})\\D?(\\d{4})$', iphone))\n elif numbers[0] in phone_mapping:\n results.append (\"+852\"+ \" \" + numbers[1] + numbers[2])\n return ';'.join(results)", "def area_code(self):\n return self.number[:3]", "def replace_street(street):\r\n if isinstance(street, str):\r\n for rep in replacements:\r\n street = re.sub(rep, \"\", street)\r\n\r\n streetint = re.findall(r'\\d+', str(street))\r\n if len(streetint) > 0 and int(streetint[0]) < 100:\r\n street = int(streetint[0])\r\n\r\n if street < 10:\r\n street = '0' + str(street) + str(streetnums[str(street)])\r\n elif street < 14:\r\n street = str(street) + 'TH'\r\n else:\r\n street = str(street) + str(streetnums[str(street)[-1]])\r\n\r\n\r\n return street", "def parse_phone(phone):\n if isinstance(phone, int):\n return str(phone)\n else:\n phone = re.sub(r'[+()\\s-]', '', str(phone))\n if phone.isdigit():\n return phone", "def evalute_number(dialed):\n if (len(dialed) == 11 or len(dialed) == 10) and str(dialed).startswith(\"0\"):\n # UK Number\n return \"+44%s\" % (dialed[1:])\n elif len(dialed) == 6:\n # Local Fishguard numbers\n return \"+441348%s\" % (dialed)\n return None", "def clean(number):\n digits = [c for c in number if c.isdigit()]\n if len(digits) == 11 and digits[0] == \"1\":\n return ''.join(digits[1:])\n elif len(digits) != 10:\n return \"0000000000\"\n else:\n return ''.join(digits)", "def test_address__normalize_phone_number__1():\n assert '+491234567890' == normalize_phone_number('+491234567890', '+49')" ]
[ "0.7460669", "0.72332627", "0.69025934", "0.6782024", "0.67445296", "0.65414447", "0.6436624", "0.64058983", "0.63791174", "0.6344951", "0.6307682", "0.62712353", "0.62262017", "0.6223244", "0.61936754", "0.618883", "0.61685634", "0.61436635", "0.61339825", "0.6082766", "0.60424566", "0.60211927", "0.6015335", "0.5971727", "0.5950815", "0.59424114", "0.59124297", "0.5896133", "0.5879692", "0.5863971" ]
0.87382734
0
als het regtype wijzigt moeten ook de naam ervan, de player, de editor en het filepad en de url aangepast worden (door het item op te halen)
def type(self, value): self._type_id = value data = RegType(value) self.regtype = data.typenaam ## self.player = data.playernaam ## self.editor = data.readernaam self.pad = os.path.join(data.padnaam, self._file) self.url = '/'.join((data.htmlpadnaam, self._file))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_ur_choose_ok_btn_clicked(self):\n ur_type = self.ur_choose_box.currentText()\n self.ur.set_UR_ROBOT(ur_type)\n self.set_ur_info_txt(\"set UR type: \" + ur_type )", "def add_player(self):\n title = \"Bienvenue dans le gestionnaire de tournois d'échec.\\nAjout d'un joueur\"\n subtitle = \"Saisir dans l'ordre :\\n\"\n\n menu = {1: ('', \"Nom du joueur\"),\n 2: ('', \"Prénom du joueur\"),\n 3: ('', \"Date de naissance (Format dd/mm/aaaa)\"),\n 4: ('', \"Sexe (H/F)\")}\n\n self.view_menu.display_menu(title=title, subtitle=subtitle, question=menu)\n\n choice = ('name', 'first_name', 'dob', '_genre')\n response = []\n\n for i in range(4):\n if 0 <= i <= 1: # pour les question nom et prénom\n\n valid = self.ask_and_store_text(menu[i + 1][1] + ' : ')\n while not valid[0]:\n valid = self.ask_and_store_text(menu[i + 1][1] + ' : ')\n response.append(valid[1])\n\n elif i == 2: # pour la date de naissance\n valid = self.view_menu.input(menu[i + 1][1] + ' : ')\n while not self._control_user_input(\"dob\", valid):\n valid = self.view_menu.input(menu[i + 1][1] + ' : ')\n response.append(valid)\n\n elif i == 3: # pour la saisie du genre\n valid = self.view_menu.input(menu[i + 1][1] + ' : ')\n while not self._control_user_input(\"_genre\", valid):\n valid = self.view_menu.input(menu[i + 1][1] + ' : ')\n response.append(valid)\n\n res = dict(zip(choice, response))\n Player(**res)\n Player.save_all_players()\n self.menu_players()", "def __str__(self):\n return \"CraftWitch\"", "def handle_player_info():\n global information_lst\n\n name = request.form.get('nickname_url')\n name = f\"'{request.form.get('nickname_url')}'\" \n\n print(name)\n if \"/\" not in name[-3:]: \n name = name[:-1] + \"/'\"\n\n print(name)\n\n player_info = start(name, \"mirage\")\n information_lst.append(player_info)\n if len(information_lst) == 1:\n return render_template(\"index.html\",your_name = information_lst[0][0], source_img = f\"{information_lst[0][1]}.jpg\",\n kd = information_lst[0][3], win_rate = information_lst[0][2], adr = information_lst[0][4],\n headshot = information_lst[0][8], accur = information_lst[0][7][0][0],\n accur2 = information_lst[0][7][1][0], accur3 = information_lst[0][7][2][0],\n weapon_img10 = f\"{information_lst[0][7][0][1]}.png\", weapon_img11 = f\"{information_lst[0][7][1][1]}.png\",\n weapon_img12 = f\"{information_lst[0][7][2][1]}.png\")\n\n if len(information_lst) == 2:\n return render_template(\"index.html\", your_name = information_lst[0][0], source_img = f\"{information_lst[0][1]}.jpg\",\n kd = information_lst[0][3], win_rate = information_lst[0][2], adr = information_lst[0][4],\n headshot = information_lst[0][8], accur = information_lst[0][7][0][0],\n accur2 = information_lst[0][7][1][0], accur3 = information_lst[0][7][2][0],\n your_name2 = information_lst[1][0], source_img2 = f\"{information_lst[1][1]}.jpg\",\n kd2 = information_lst[1][3], win_rate2 = information_lst[1][2], adr2 = information_lst[1][4],\n headshot2 = information_lst[1][8], accur20 = information_lst[1][7][0][0],\n accur21 = information_lst[1][7][1][0], accur22 = information_lst[1][7][2][0],\n weapon_img10 = f\"{information_lst[0][7][0][1]}.png\", weapon_img11 = f\"{information_lst[0][7][1][1]}.png\",\n weapon_img12 = f\"{information_lst[0][7][2][1]}.png\", weapon_img20 = f\"{information_lst[1][7][0][1]}.png\",\n weapon_img21 = f\"{information_lst[1][7][1][1]}.png\", weapon_img22 = f\"{information_lst[1][7][2][1]}.png\")\n if len(information_lst) == 3:\n return render_template(\"index.html\", your_name = information_lst[0][0], source_img = f\"{information_lst[0][1]}.jpg\",\n kd = information_lst[0][3], win_rate = information_lst[0][2], adr = information_lst[0][4],\n headshot = information_lst[0][8], accur = information_lst[0][7][0][0],\n accur2 = information_lst[0][7][1][0], accur3 = information_lst[0][7][2][0],\n your_name2 = information_lst[1][0], source_img2 = f\"{information_lst[1][1]}.jpg\",\n kd2 = information_lst[1][3], win_rate2 = information_lst[1][2], adr2 = information_lst[1][4],\n headshot2 = information_lst[1][8], accur20 = information_lst[1][7][0][0],\n accur21 = information_lst[1][7][1][0], accur22 = information_lst[1][7][2][0],\n your_name3 = information_lst[2][0], source_img3 = f\"{information_lst[2][1]}.jpg\",\n kd3 = information_lst[2][3], win_rate3 = information_lst[2][2], adr3 = information_lst[2][4],\n headshot3 = information_lst[2][8], accur30 = information_lst[2][7][0][0],\n accur31 = information_lst[2][7][1][0], accur32 = information_lst[2][7][2][0],\n weapon_img10 = f\"{information_lst[0][7][0][1]}.png\", weapon_img11 = f\"{information_lst[0][7][1][1]}.png\",\n weapon_img12 = f\"{information_lst[0][7][2][1]}.png\", weapon_img20 = f\"{information_lst[1][7][0][1]}.png\",\n weapon_img21 = f\"{information_lst[1][7][1][1]}.png\", weapon_img22 = f\"{information_lst[1][7][2][1]}.png\",\n weapon_img30 = f\"{information_lst[2][7][0][1]}.png\", weapon_img31 = f\"{information_lst[2][7][1][1]}.png\",\n weapon_img32 = f\"{information_lst[2][7][2][1]}.png\")\n if len(information_lst) == 4:\n return render_template(\"index.html\", your_name = information_lst[0][0], source_img = f\"{information_lst[0][1]}.jpg\",\n kd = information_lst[0][3], win_rate = information_lst[0][2], adr = information_lst[0][4],\n headshot = information_lst[0][8], accur = information_lst[0][7][0][0],\n accur2 = information_lst[0][7][1][0], accur3 = information_lst[0][7][2][0],\n your_name2 = information_lst[1][0], source_img2 = f\"{information_lst[1][1]}.jpg\",\n kd2 = information_lst[1][3], win_rate2 = information_lst[1][2], adr2 = information_lst[1][4],\n headshot2 = information_lst[1][8], accur20 = information_lst[1][7][0][0],\n accur21 = information_lst[1][7][1][0], accur22 = information_lst[1][7][2][0],\n your_name3 = information_lst[2][0], source_img3 = f\"{information_lst[2][1]}.jpg\",\n kd3 = information_lst[2][3], win_rate3 = information_lst[2][2], adr3 = information_lst[2][4],\n headshot3 = information_lst[2][8], accur30 = information_lst[2][7][0][0],\n accur31 = information_lst[2][7][1][0], accur32 = information_lst[2][7][2][0],\n your_name4 = information_lst[3][0], source_img4 = f\"{information_lst[3][1]}.jpg\",\n kd4 = information_lst[3][3], win_rate4 = information_lst[3][2], adr4 = information_lst[3][4],\n headshot4 = information_lst[3][8], accur40 = information_lst[3][7][0][0],\n accur41 = information_lst[2][7][1][0], accur42 = information_lst[3][7][2][0],\n weapon_img10 = f\"{information_lst[0][7][0][1]}.png\", weapon_img11 = f\"{information_lst[0][7][1][1]}.png\",\n weapon_img12 = f\"{information_lst[0][7][2][1]}.png\", weapon_img20 = f\"{information_lst[1][7][0][1]}.png\",\n weapon_img21 = f\"{information_lst[1][7][1][1]}.png\", weapon_img22 = f\"{information_lst[1][7][2][1]}.png\",\n weapon_img30 = f\"{information_lst[2][7][0][1]}.png\", weapon_img31 = f\"{information_lst[2][7][1][1]}.png\",\n weapon_img32 = f\"{information_lst[2][7][2][1]}.png\", weapon_img40 = f\"{information_lst[3][7][0][1]}.png\",\n weapon_img41 = f\"{information_lst[3][7][1][1]}.png\", weapon_img42 = f\"{information_lst[3][7][2][1]}.png\")\n if len(information_lst) == 5:\n return render_template(\"index.html\", your_name = information_lst[0][0], source_img = f\"{information_lst[0][1]}.jpg\",\n kd = information_lst[0][3], win_rate = information_lst[0][2], adr = information_lst[0][4],\n headshot = information_lst[0][8], accur = information_lst[0][7][0][0],\n accur2 = information_lst[0][7][1][0], accur3 = information_lst[0][7][2][0],\n your_name2 = information_lst[1][0], source_img2 = f\"{information_lst[1][1]}.jpg\",\n kd2 = information_lst[1][3], win_rate2 = information_lst[1][2], adr2 = information_lst[1][4],\n headshot2 = information_lst[1][8], accur20 = information_lst[1][7][0][0],\n accur21 = information_lst[1][7][1][0], accur22 = information_lst[1][7][2][0],\n your_name3 = information_lst[2][0], source_img3 = f\"{information_lst[2][1]}.jpg\",\n kd3 = information_lst[2][3], win_rate3 = information_lst[2][2], adr3 = information_lst[2][4],\n headshot3 = information_lst[2][8], accur30 = information_lst[2][7][0][0],\n accur31 = information_lst[2][7][1][0], accur32 = information_lst[2][7][2][0],\n your_name4 = information_lst[3][0], source_img4 = f\"{information_lst[3][1]}.jpg\",\n kd4 = information_lst[3][3], win_rate4 = information_lst[3][2], adr4 = information_lst[3][4],\n headshot4 = information_lst[3][8], accur40 = information_lst[3][7][0][0],\n accur41 = information_lst[2][7][1][0], accur42 = information_lst[3][7][2][0],\n your_name5 = information_lst[4][0], source_img5 = f\"{information_lst[4][1]}.jpg\",\n kd5 = information_lst[4][3], win_rate5 = information_lst[4][2], adr5 = information_lst[4][4],\n headshot5 = information_lst[4][8], accur50 = information_lst[4][7][0][0],\n accur51 = information_lst[4][7][1][0], accur52 = information_lst[4][7][2][0],\n weapon_img10 = f\"{information_lst[0][7][0][1]}.png\", weapon_img11 = f\"{information_lst[0][7][1][1]}.png\",\n weapon_img12 = f\"{information_lst[0][7][2][1]}.png\", weapon_img20 = f\"{information_lst[1][7][0][1]}.png\",\n weapon_img21 = f\"{information_lst[1][7][1][1]}.png\", weapon_img22 = f\"{information_lst[1][7][2][1]}.png\",\n weapon_img30 = f\"{information_lst[2][7][0][1]}.png\", weapon_img31 = f\"{information_lst[2][7][1][1]}.png\",\n weapon_img32 = f\"{information_lst[2][7][2][1]}.png\", weapon_img40 = f\"{information_lst[3][7][0][1]}.png\",\n weapon_img41 = f\"{information_lst[3][7][1][1]}.png\", weapon_img42 = f\"{information_lst[3][7][2][1]}.png\",\n weapon_img50 = f\"{information_lst[4][7][0][1]}.png\", weapon_img51 = f\"{information_lst[4][7][1][1]}.png\",\n weapon_img52 = f\"{information_lst[4][7][2][1]}.png\")", "def capteur_info_relever1():\n return render_template(\n \"relever_capt.html\",\n liste = get_capteurs())", "def make_player(self, page):\r\n player = Player()\r\n face = page.find(\"div\",id=\"info_content\").find_all(\"td\")\r\n player.name = face[0].get_text().strip()\r\n player.club = face[1].get_text().strip()\r\n player.nation = face[2].get_text().strip()\r\n player.league = face[3].get_text().strip()\r\n player.sf = int(face[4].get_text().strip())\r\n player.wf = int(face[5].get_text().strip())\r\n player.ir = int(face[6].get_text().strip())\r\n player.foot = face[7].get_text().strip()\r\n player.height = float(face[8].get_text().split(\"|\")[0].strip(\"cm \"))\r\n player.weight = float(face[9].get_text().strip(\"\"))\r\n player.version = face[10].get_text().strip()\r\n player.def_wr = face[11].get_text().strip()\r\n player.att_wr = face[12].get_text().strip()\r\n player.added_on = datetime.strptime(face[13].get_text().strip()[2:], \"%y-%m-%d\")\r\n player.real_face = face[15].get_text().strip()==\"icon-checkmark text-success\"\r\n player.body_type = face[16].get_text().strip()\r\n player.age = face[17].get_text().strip(\" years old \\n\\r\")\r\n player.rating = self.make_rating([sub for sub in page.find(\"div\",id=\"stats_box\").find(class_=\"stats-inner col-md-12\").find(class_=\"row\").children])\r\n player.href = \"/\"+page.find(id=\"share_player_link\")[\"value\"].strip(\"https://www.futbin.com/\")\r\n player.pid = int(page.find(id=\"page-info\")[\"data-player-resource\"])\r\n return player", "def display_make_a_player():\n display_head_menu(\"Création du joueur\")\n player_dict = {}\n player_dict.update({\"lastname\": display(\"Nom: \")})\n player_dict.update({\"firstname\": display(\"Prénom: \")})\n player_dict.update({\"gender\": display(\"Genre : (homme/femme)\", \"gender\")})\n player_dict.update({\"date_of_birth\": display(\"Date de naissance: \", \"date\")})\n player_dict.update({\"ranking\": display(\"Classement : \", \"natural\")})\n return player_dict", "def getDescription(self):\n return \"GGP Players (*.player)\"", "def packgame1(self, event):\n\n username = wx.TextEntryDialog(self, \"\", \"Enter your Mod name here\", \"\", wx.OK | wx.CANCEL)\n\n modal = username.ShowModal()\n userName = username.GetValue()\n username.Destroy()\n\n if modal == wx.ID_OK:\n password = wx.PasswordEntryDialog(self, \"\", \"Enter your password here\", \"\", wx.OK | wx.CANCEL)\n\n modal = password.ShowModal()\n passWord = password.GetValue()\n password.Destroy()\n\n if modal == wx.ID_OK:\n stream = wx.TextEntryDialog(self, \"\", \"Enter your game1 stream name here\", \"\", wx.OK | wx.CANCEL)\n\n modal = stream.ShowModal()\n streamName = stream.GetValue()\n stream.Destroy()\n\n if modal == wx.ID_OK:\n whatStream = f\"https://packmaster.office.company.com/run.ws?label=p4&task=rs2_incremental&rtArgs0={streamName}&slave=Any\"\n\n password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()\n password_mgr.add_password(None, whatStream, userName, passWord)\n\n handler = urllib.request.HTTPBasicAuthHandler(password_mgr)\n opener = urllib.request.build_opener(handler)\n urllib.request.install_opener(opener)\n\n try:\n packmaster_id = urllib.request.urlopen(whatStream).read()\n packmaster_id = packmaster_id.decode(\"utf-8\")\n webbrowser.open(\"https://packmaster.office.company.com/\")\n\n wrongitem = f'\"id\": {packmaster_id},\\n \"label\": \"p4\",\\n \"name\": \"rs2_incremental\",\\n \"status\": \"Failed\"'.encode()\n\n packmaster_url = f\"https://packmaster.office.company.com/jobs_js.ws?focus={packmaster_id}&displayType=1\"\n packscrape_top_level_url = packmaster_url\n password_mgr.add_password(None, packscrape_top_level_url, userName, passWord)\n\n packmaster_html = urllib.request.urlopen(packmaster_url).read()\n\n if wrongitem in packmaster_html:\n self.SetStatusText(f\"Packing has failed.\")\n else:\n self.SetStatusText(f\"Login Successful, now packing {streamName}\")\n\n except urllib.request.HTTPError as e:\n self.SetStatusText(f\"{e}\")\n\n # def threadmaster(self, event):\n worker = Thread(target=self.packgame2())\n worker.setDaemon(True)\n worker.start()", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/wetek.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# WetekPlay\n self.wp_button = pyxbmct.RadioButton('')\n self.placeControl(self.wp_button, 10, 3, rowspan=2, columnspan=4)\n self.connect(self.wp_button, self.wp_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wetekplay', 2) == 1:\n self.wp_button.setSelected(True)\n else:\n self.wp_button.setSelected(False)\n wp = pyxbmct.Image(addonfolder+artsfolder+'/wp.png')\n self.placeControl(wp, 10, 3, rowspan=2, columnspan=4)\n\n\t\t# WetekPlay2\n self.wp2_button = pyxbmct.RadioButton('')\n self.placeControl(self.wp2_button, 10, 9, rowspan=2, columnspan=4)\n self.connect(self.wp2_button, self.wp2_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wetekplay2', 2) == 1:\n self.wp2_button.setSelected(True)\n else:\n self.wp2_button.setSelected(False)\n wp2 = pyxbmct.Image(addonfolder+artsfolder+'/wp2.png')\n self.placeControl(wp2, 10, 9, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def newPlayer():\r\n pass", "def test_gethardwares_item(self):\n pass", "def LookOn(play, item):\r\n\tspk(\"You start perusing the items on %s\" % item.name)\r\n\tif item.items != []:\r\n\t\tlookoner(play, item)\r\n\telse:\r\n\t\tspk(\"Nothing\")", "def save_updated_player(variable1, variable2, variable3, frame):\r\n name = frame.children[\"!entry\"].get()\r\n nickname = frame.children[\"!entry2\"].get()\r\n elo = variable1.get()\r\n first_role = variable2.get()\r\n second_role = variable3.get()\r\n\r\n updated_player = player_class.Player(name, nickname, elo, first_role, second_role)\r\n\r\n for k in range(len(registeredPlayers)):\r\n if registeredPlayers[k].regNumber == updated_player.regNumber:\r\n registeredPlayers[k] = updated_player\r\n clear_frames()\r\n frame = Frame(window)\r\n frame.pack()\r\n createdFrames.append(frame)\r\n lb = Label(frame, text=\"Update successful\", bg=\"black\", fg=\"white\")\r\n lb.pack()", "def play(self, player, game):\n super().play(player, game)\n game.set_action(\"PICKUP_CODER\")", "def fixRotterdamItem(item):\n pywikibot.output(u'Working on %s' % (item.title(),))\n data = item.get()\n claims = data.get('claims')\n if not u'P217' in claims:\n pywikibot.output(u'No inventory number found, skipping')\n return\n if not len(claims.get(u'P217'))==1:\n pywikibot.output(u'Multiple inventory numbers found, skipping')\n return\n invclaim = claims.get(u'P217')[0]\n inventorynumber = invclaim.getTarget()\n if not u'_' in inventorynumber:\n pywikibot.output(u'No _ found in inventory number, skipping')\n return\n newinventorynumber = inventorynumber.replace(u'_', u'-')\n\n if not u'P973' in claims:\n pywikibot.output(u'No url found, skipping')\n return\n if not len(claims.get(u'P973'))==1:\n pywikibot.output(u'Multiple urls found, skipping')\n return\n urlclaim = claims.get(u'P973')[0]\n url = urlclaim.getTarget()\n newurl = url\n\n if not u'collectie.museumrotterdam.nl/objecten/' in url:\n pywikibot.output(u'Invalid url: %s, skipping' % (url,))\n return\n if not url.endswith(newinventorynumber):\n pywikibot.output(u'Url %s and inventory number %s don\\'t match, skipping' % (url,newinventorynumber))\n return\n\n museumpage = requests.get(url)\n if u'Pagina niet gevonden' in museumpage.text:\n newurl = newurl + u'-B'\n pywikibot.output(u'Current url %s broken, trying %s' % (url,newurl ))\n newinventorynumber = newinventorynumber + u'-B'\n museumpage = requests.get(newurl)\n if not u'content=\"Museum Rotterdam - van de stad\">' in museumpage.text:\n pywikibot.output(u'New url did not work, skipping')\n return\n\n summary = u'Fixing Rotterdam Museum'\n if inventorynumber!=newinventorynumber:\n invclaim.changeTarget(newinventorynumber, summary=summary)\n if url !=newurl:\n urlclaim.changeTarget(newurl, summary=summary)", "def add_player(inp_to_add, type_to_add, host, root, password):\r\n detail_dict = {}\r\n\r\n if type_to_add == \"url\":\r\n player_soup = BeautifulSoup(requests.get(inp_to_add).text, 'html.parser')\r\n player_site = inp_to_add\r\n else:\r\n player_soup, player_site = get_first_search_result(\r\n SOCCER_URL + \"/search/players/?q=\" + inp_to_add, player=1)\r\n\r\n if player_soup:\r\n\r\n passport = player_soup.find('div', class_=\"block_player_passport real-content clearfix\")\r\n\r\n if passport:\r\n details = passport.find_all('dt')\r\n results = passport.find_all('dd')\r\n\r\n detail_dict = {}\r\n for i in range(len(details)):\r\n detail_dict[details[i].text] = results[i].text\r\n\r\n league_url = SOCCER_URL + player_soup.find('table', class_=\"playerstats career sortable table\")\\\r\n .tbody.tr.find('td', class_=\"competition\").a[\"href\"]\r\n find_league({league_url}, \"url\", host, root, password)\r\n\r\n return detail_dict[\"First name\"], detail_dict[\"Last name\"] # Return first and last name as in DB\r", "def revisar_input(self):\n pass", "def save_capteur():\n f = None\n f = CapteurForm()\n a = get_capteur(f.get_id())\n na = a.get_name()\n if f.validate_on_submit():\n a.set_name(f.get_name())\n a.set_num(f.get_phoneNumber())\n a.set_interval(f.get_interval())\n if a.get_parterre() != f.get_parterre().get_id():\n a.set_parterre(f.get_parterre().get_id())\n if a.get_typeMesure() != f.get_typeMesure().get_id():\n a.set_typeMesure(f.get_typeMesure().get_id())\n ac = Actions(\n contenu = \"Modification du capteur \"+na+\" -> \"+a.get_name(),\n liste = 1\n )\n db.session.add(ac)\n db.session.commit()\n return redirect(url_for(\n \"capteur_info\",\n id = f.get_id()))\n f.next.data = \"save_capteur\"\n return render_template(\n \"addCapteur.html\",\n title = a.get_name()+\" - edit\",\n form = f,\n capteur=a,\n param = \"modif\")", "def save_plante():\n f = None\n f = PlanteForm()\n f.parterre.data = get_parterre(get_plante(f.get_id()).get_parterre())\n a = get_plante(f.get_id())\n if f.validate_on_submit():\n a.set_name(f.get_name())\n a.set_comportement(f.get_comportement())\n a.set_taux_humidite(f.get_taux_humidite())\n a.set_quantite(f.get_quantite())\n p = Actions(\n contenu = \"Modification de la plante \"+a.get_name() + \" du parterre \"+ get_parterre(a.get_parterre()).get_name(),\n liste = 1\n )\n db.session.add(p)\n db.session.commit()\n return redirect(url_for(\n \"plante_info\", id = f.get_id()))\n f.next.data = \"save_plante\"\n return render_template(\n \"create-plante.html\",\n title = a.get_name()+\" - edit\",\n form = f,\n plante = a,\n param = \"modif\")", "def __init__(self, ventana_id):\n\n GObject.Object.__init__(self)\n\n self.name = \"MplayerReproductor\"\n self.ventana_id = ventana_id\n self.mplayer = False\n self.salida = False\n self.entrada = False\n self.estado = False\n self.duracion = 0\n self.posicion = 0\n self.volumen = 0\n self.actualizador = False\n self.uri = False\n self.video_in_stream = False\n\n self.config = {\n 'saturacion': 0,\n 'contraste': 0,\n 'brillo': 0,\n 'hue': 0,\n 'gamma': 0\n }\n\n self.efectos = []\n self.config_efectos = {}", "def test_add_media_type(self):\n\n # check if documentalist has access to create new media-types\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n form_data = {\n 'status': '0',\n 'acronym': 'foto',\n 'name': 'Foto',\n 'language' : 'pt-br',\n 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0',\n }\n\n response = self.client.post('/multimedia/media-type/new', form_data, follow=True )\n\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, \"Foto\")", "def onOpen(self):", "def __init__(self, pname, pmax, plist):\n\n #the player has to have... \n self.name = pname\n self.max_items = pmax\n self.items = plist", "def menu_saving(self, app: object, entry: str) -> None:\n while True:\n prod = self.cmd_products.get(entry)\n alt = app.search_alt(prod)\n sub = app.relevance(alt)\n print(\"-\" * 50)\n print(f\"\\nSubstitut trouvé pour le produit {prod} : {sub}\")\n entry = input(\n \"\\nVoulez vous enregistrer le substitut dans votre liste ? (y/n)\"\n )\n if entry == \"y\":\n feedback = app.insert_sub(prod, sub)\n print(feedback)\n self.back = True\n break\n elif entry == \"n\":\n self.back = True\n break\n else:\n print(\"\\nCommande incorrecte\")", "def on_load(self):", "def test_embed_ok(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_URL) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"youtube_video\")\n self.find(\"<object width\")\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', NOTAGS_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_EMBED) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"<object width\")", "def Register(self,schedule):\n # oscar login page\n oscar = \"https://oscar.gatech.edu/pls/bprod/twbkwbis.P_GenMenu?name=bmenu.P_StuMainMnu\"\n \n #mechanize boilerplate\n br = mechanize.Browser()\n cj = cookielib.LWPCookieJar()\n br.set_cookiejar(cj)\n br.set_handle_equiv(True)\n br.set_handle_gzip(True)\n br.set_handle_redirect(True)\n br.set_handle_referer(True)\n br.set_handle_robots(False)\n br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\n br.addheaders = [(\"User-agent\", \"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1\")]\n\n #open oscar sign-in page and grab login form\n r = br.open(oscar)\n br.form = list(br.forms())[0]\n br[\"sid\"] = self._id\n br[\"PIN\"] = self.pin\n res = br.submit()\n\n #initial landing page once signed into oscar\n br.open(\"https://oscar.gatech.edu/pls/bprod/twbkwbis.P_GenMenu?name=bmenu.P_RegMnu\")\n\n #jump to registration sub menu\n br.open(\"https://oscar.gatech.edu/pls/bprod/bwskfreg.P_AltPin\")\n\n #the year selection form is the second(hence 1st index)\n #defaults to the current year so we can just submit\n br.form = list(br.forms())[1]\n br.submit()\n\n #now we are at the registration page\n #the text fields are in the second form\n br.form = list(br.forms())[1]\n fields = []\n\n #the text fields all have the same name and type\n #so we'll just insert them into a list \n for control in br.form.controls:\n if control.type == \"text\" and control.name == \"CRN_IN\":\n fields.append(control)\n\n #set each text fields equal to a class in the schedule\n for field, course in zip(fields, schedule):\n field.value = str(course)\n \n response = br.submit()\n registered_classes = self.EnrolledClasses(response)\n return registered_classes", "def item(self, item_name):\n\tself.log.info('Not implemented yet... Sorry!')\n\tpass", "def make(item:dict):\n main_image = item[\"item_json\"][\"Item\"][\"mediumImageUrls\"][0][\"imageUrl\"]\n new_main_image = rak_image_mainpulation.sz10(main_image_url=main_image)\n embedVar = discord.Embed(title=item[\"item_name\"], description=item[\"price\"], color=0x00ff00)\n embedVar.set_image(url=new_main_image)\n embedVar.add_field(name=\"Link\", value=item[\"item_url\"], inline=False)\n return embedVar" ]
[ "0.5468898", "0.5372713", "0.5341861", "0.5332574", "0.5239701", "0.5222167", "0.51035255", "0.50887513", "0.5044618", "0.49934238", "0.49651843", "0.49612513", "0.4917673", "0.48918155", "0.48897576", "0.48815265", "0.48449853", "0.48328492", "0.48286667", "0.48030823", "0.47977054", "0.47900555", "0.47888008", "0.47857985", "0.4781796", "0.4755281", "0.47507796", "0.4747506", "0.47429636", "0.4737392" ]
0.65343946
0
als het songid wijzigt moet ook de titel aangepast worden (door de song te raadplegen)
def song(self, value): self._song_id = value data = Song(value) self.songtitel = data.songtitel if data.found else ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_title():", "def media_title(self):\n return self.coordinator.data.nowplaying[self.zone.SourceID].CurrSong.Title", "def construct_metadata(song):\n print(song) #temp", "def tv_tropes_id(title):\n pass", "def get_title_by_id(id):\n\n # your code", "def get_title_song(mess_chat_id):\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"SELECT title_song FROM song_data \"\n \"WHERE user_id = {0};\".format(mess_chat_id)\n )\n\n # obtain the first element of the set\n title_of_song = cursor.fetchone()[0]\n\n return title_of_song", "def _update_title(self, title, tag, lid):\n return title", "def media_title(self):\n if self._track_id is not None and self._playlist:\n for track in self._playlist:\n if track.get(\"id\") == self._track_id:\n return track.get(\"title\")\n return None", "def imdb_id(title):\n pass", "def set_now_playing_title(self, title):\n self.now_playing.text = title", "def getTitle(self):\n\n # print(self.soupObject.title.string)\n try:\n s = self.soupObject.find(\"meta\", attrs={\"name\": \"twitter:title\"})\n self.title = str(s['content'])\n self.title = self.title.replace(\"/\", \"\")\n self.title = self.title.strip()\n if not self.title:\n s = int(\"deliberateError\")\n\n # except\n except:\n self.title = \"Amazonsubtitles\"\n\n pass", "def update_title_song(title_song, mess_chat_id):\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"UPDATE song_data SET title_song = %s \"\n \"WHERE user_id = %s;\", (title_song, mess_chat_id)\n )\n\n connection.commit()", "def parse_song_data(data):\r\n song_title_regex = re.compile(r'<title>([\\S\\s]+)</title>')\r\n\r\n match = song_title_regex.search(data)\r\n\r\n song_title = match.groups(0)[0]\r\n\r\n # Replaces the HTML code for apostrophe with the symbol\r\n return re.sub(r'&#39;', \"\\'\", song_title)", "def songInfo():\n \n global songFile, currentRadio\n \n lines = songFile.readlines()\n if len(lines) > 0:\n\n songFile.seek(0)\n title = formatSong(lines[0]).strip()\n \n with canvas(device) as draw:\n invert(draw, 0, 0, names[currentRadio][0], True)\n if len(title)<19:\n draw.text((72-4*(len(title)), 20), title , fill=\"white\")\n else:\n lineNum = len(title)\n if lineNum > 72:\n lineNum = 72\n thelist = [title[i:i+19] for i in range(0, lineNum, 19)]\n for i in range(len(thelist)): \n draw.text((81-4*(len(thelist[i].strip())), 19+10*i), thelist[i] , fill=\"white\")", "def setTitlu(self, titlu):\n self.titlu = titlu", "def Show_Titles( self ):\r\n self.system.Change_Seq( \"Title\" )", "def bottle_song_for(num):\n pass", "def getTitle(self): #$NON-NLS-1$\r", "def getTitle(self): #$NON-NLS-1$\r", "def set_title(audio: EasyID3, title: str):\r\n audio['title'] = title\r\n audio.save()", "def dummy_movie_snippet(doc_id):\n with open('2018_movies.json') as f:\n doc = json.load(f)\n doc_file = doc[doc_id]\n s = doc_file['Text'][:100] + \"......\"\n return (doc_id, doc_file['Title'], s)", "def song(song_id):\n return process_input(song_id) #jsonify(recomendations)", "def search_title(self):\n\t\tnew_name = self.removez_all(self.init_str)\n\t\tresult = self.search_ext(new_name)\n\t\tresult = self.search_encoder(result)\n\t\tresult = self.search_quality(result)\n\t\tresult = self.search_codec(result)\n\t\tresult = self.search_lang(result)\n\t\tresult = self.search_version(result)\n\t\tresult = self.search_source(result)\n\t\tresult = self.search_audio(result)\n\t\tresult = self.search_year(result)\n\t\tresult = result.replace('...', '.')\n\t\tresult = result.replace('..', '.')\n\t\tself.title = self.remove_lasts_dots(result)", "def WaveletSetTitle(self, wave_id, wavelet_id, title):\n raise NotImplementedError()", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def _http_get_title_by_id(self, id) -> dict:\n if int(id) == -1:\n # there is no title\n return None\n playl = self._http_playlist()\n return [title for title in playl if int(title['id']) == int(id)][0]", "def get_title_artist(title_element): \n \n \n title_token = title_element.text.split(\" \")\n\n word = title_token.pop(0)\n artist = ''\n title = ''\n first = True\n while(title_token != [] and word != '-' and word[-1] != '-'):\n if first:\n first = False\n artist += (word)\n else:\n artist += ' '\n artist += word\n\n word = title_token.pop(0)\n \n if word[-1] == '-':\n word = word[:-1]\n artist += word\n \n if title_token == []:\n print(\"ERROR HERE: \", title_element.text)\n return None, None\n \n word = title_token.pop(0)\n first = True\n\n while(True):\n if first:\n first = False\n title += word\n else:\n title += ' '\n title += word\n if title_token != []:\n word = title_token.pop(0)\n if word == \"ALBUM\" or (word == \"EP\" and title_token[0] == \"REVIEW\"):\n break\n else:\n break\n return title, artist", "def ft_in_title(self, item, drop_feat):\n artist = item.artist.strip()\n albumartist = item.albumartist.strip()\n\n # Check whether there is a featured artist on this track and the\n # artist field does not exactly match the album artist field. In\n # that case, we attempt to move the featured artist to the title.\n _, featured = split_on_feat(artist)\n if featured and albumartist != artist and albumartist:\n self._log.info('{}', displayable_path(item.path))\n\n feat_part = None\n\n # Attempt to find the featured artist.\n feat_part = find_feat_part(artist, albumartist)\n\n # If we have a featuring artist, move it to the title.\n if feat_part:\n self.update_metadata(item, feat_part, drop_feat)\n else:\n self._log.info('no featuring artists found')", "def song_lyrics(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n return words", "def scrape_song(url):\n soup = scrapekit.handle_url(url)\n\n contents = scrape_id_to_div(soup, \"Lyrics\")\n if not contents:\n return None\n\n filetext = ''.join(c.text for c in contents)\n\n # Check if there is a reprise\n REPRISE = 'Reprise'\n\n reprise = soup.find(id=REPRISE)\n if reprise:\n filetext += '\\n\\n'\n filetext += REPRISE + ':\\n\\n'\n\n contents = scrape_id_to_div(soup, REPRISE)\n filetext += ''.join(c.text for c in contents)\n\n # Get song title, fix blank spaces for file name\n songtitle = soup.title.text.split('|')[0]\n\n song_text = ''\n song_text += 'Song: {}\\n'.format(songtitle)\n song_text += get_infobox_info(soup)\n song_text += '\\n\\n'\n song_text += filetext\n\n return song_text" ]
[ "0.666082", "0.64697766", "0.6342941", "0.63207954", "0.62568146", "0.6199513", "0.618698", "0.6174337", "0.61728776", "0.61343974", "0.6076811", "0.60717183", "0.6051481", "0.6007897", "0.5971578", "0.59425765", "0.5912056", "0.58918613", "0.58918613", "0.58787864", "0.58745617", "0.58720106", "0.5843827", "0.5840972", "0.5838923", "0.58165115", "0.5815271", "0.5801383", "0.5796682", "0.577979" ]
0.70082396
0
Import the `datafile` Excel sheet to a CSV representation stored within the database that can be further processed without large filesystem operations. This also stores the original file's hash in order to skip importing unchanged data. Returns boolean (whether file was imported).
def import_datafile(db, infile): res = stat(infile) mtime = datetime.utcfromtimestamp(res.st_mtime) hash = md5hash(infile) data_file = db.model.data_file # Should maybe make sure error is not set rec = db.get(data_file, hash) # We are done if we've already imported if rec is not None: return False # Values to insert cols = dict( file_hash=hash, file_mtime=mtime, basename=infile.stem, csv_data=None) try: cols['csv_data'] = extract_datatable(infile) except NotImplementedError as e: secho(str(e), fg='red', dim=True) tbl = data_file.__table__ sql = (insert(tbl) .values(file_path=str(infile), **cols) .on_conflict_do_update( index_elements=[tbl.c.file_path], set_=dict(**cols))) db.session.execute(sql) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_data_from_file(self, filename):\n self.get_cursor()\n ct = len([True for c in self.table.columns if c[1][0][:3] == \"ct-\"]) != 0\n if (([self.table.cleanup.function, self.table.delimiter,\n self.table.header_rows] == [no_cleanup, \",\", 1])\n and not self.table.fixed_width\n and not ct\n and (not hasattr(self.table, \"do_not_bulk_insert\") or not self.table.do_not_bulk_insert)):\n columns = self.table.get_insert_columns()\n filename = os.path.abspath(filename)\n statement = \"\"\"\nCOPY \"\"\" + self.table_name() + \" (\" + columns + \"\"\")\nFROM '\"\"\" + filename.replace(\"\\\\\", \"\\\\\\\\\") + \"\"\"'\nWITH DELIMITER ','\nCSV HEADER;\"\"\"\n try:\n self.execute(\"BEGIN\")\n self.execute(statement)\n self.execute(\"COMMIT\")\n except:\n self.connection.rollback()\n return Engine.insert_data_from_file(self, filename)\n else:\n return Engine.insert_data_from_file(self, filename)", "def upload_csv_data(self, upload_file):\n db = DataBase(self.DATABASE_DATA)\n db.insert_data_from_file(\n 'triagedata.historicdata',\n ('clinic_id', 'severity', 'date_received', 'date_seen'),\n upload_file,\n ','\n )", "def insert_data_from_file(self, filename):\n self.get_cursor()\n if self.check_bulk_insert() and self.table.header_rows < 2 and (\n self.table.delimiter in [\"\\t\", \",\"]):\n print(\"Inserting data from \" + os.path.basename(filename) + \"...\")\n\n if self.table.delimiter == \"\\t\":\n fmt = \"TabDelimited\"\n elif self.table.delimiter == \",\":\n fmt = \"CSVDelimited\"\n\n if self.table.header_rows == 1:\n hdr = \"Yes\"\n else:\n hdr = \"No\"\n\n columns = self.table.get_insert_columns()\n\n need_to_delete = False\n add_to_record_id = 0\n\n if self.table.pk and not self.table.contains_pk:\n if '.' in os.path.basename(filename):\n proper_name = filename.split('.')\n len_name = len(proper_name)\n newfilename = '.'.join(\n proper_name[0:-1] if len_name > 0 else proper_name[0]\n ) + \"_new.\" + filename.split(\".\")[-1]\n else:\n newfilename = filename + \"_new\"\n\n if not os.path.isfile(newfilename):\n print(\"Adding index to \" + os.path.abspath(newfilename) + \"...\")\n read = open(filename, \"rb\")\n write = open(newfilename, \"wb\")\n to_write = \"\"\n\n for line in read:\n line = line.strip()\n to_write += str(id) + self.table.delimiter + line\n add_to_record_id += 1\n self.table.record_id += add_to_record_id\n\n write.write(to_write + os.linesep)\n write.close()\n read.close()\n need_to_delete = True\n columns = \"record_id, \" + columns\n else:\n newfilename = filename\n\n newfilename = os.path.abspath(newfilename)\n filename_length = (len(os.path.basename(newfilename)) * -1) - 1\n filepath = newfilename[:filename_length]\n statement = \"\"\"\nINSERT INTO \"\"\" + self.table_name() + \" (\" + columns + \"\"\")\nSELECT * FROM [\"\"\" + os.path.basename(newfilename) + ''']\nIN \"''' + filepath + '''\" \"Text;FMT=''' + fmt + ''';HDR=''' + hdr + ''';\"'''\n try:\n self.execute(statement)\n return True\n except BaseException:\n print(\"Couldn't bulk insert. Trying manual insert.\")\n self.connection.rollback()\n self.table.record_id -= add_to_record_id\n return None\n finally:\n if need_to_delete:\n os.remove(newfilename)\n\n return Engine.insert_data_from_file(self, filename)", "def csv(self, file, table=None):\n\n if table:\n table.import_from_csv_file(file)\n else:\n db = self.db\n # This is the preferred method as it updates reference fields\n db.import_from_csv_file(file)\n db.commit()", "def _load_excel(self, file_path):\n if not os.path.exists(file_path):\n self._logger.error(\"File not exists {0}\".format(file_path))\n return False\n\n # Load in the workbook file\n self._workbook = openpyxl.load_workbook(file_path)\n\n if not self._workbook:\n self._logger(\"Failed to load excel file {0}\".format(file_path))\n return False\n\n self.sheet_names = self._workbook.sheetnames\n return True", "def save_dataframe_to_csv(data: pd.DataFrame, filename: str) -> bool:\n try:\n data = clean_data(data)\n data.to_csv(filename)\n return True\n except(FileNotFoundError):\n print('Error: Could not save the dataframe to csv.')\n return False", "def import_rows(self, csv_file, table_id=None):\n if table_id:\n self.table_id = table_id\n\n params = {'startLine': 1, # skip cols?\n 'encoding': \"UTF-8\",\n 'delimiter': \",\",\n 'isStrict': True}\n\n media = MediaFileUpload(csv_file, mimetype='text/csv', resumable=True)\n self.request = self._table().importRows(tableId=self.table_id, media_body=media, **params)\n self._process_request(name='import_rows', resumable=True)\n \n # URL for new look \n logger.info(\"The fusion table is located at: {}\".format(\n self.build_uri('/view')))\n return True", "def _check_import(isamAppliance, id, filepath):\n tmpdir = get_random_temp_dir()\n tmp_original_file = os.path.join(tmpdir, os.path.basename(\"tempfile.txt\"))\n\n export_file(isamAppliance, instance_id=id, filepath=tmp_original_file, check_mode=False, force=True)\n\n if files_same(tmp_original_file, filepath):\n logger.debug(\"files are the same, so we don't want to do anything\")\n shutil.rmtree(tmpdir)\n return False\n else:\n logger.debug(\"files are different, so we return True to indicate the new file should be imported\")\n shutil.rmtree(tmpdir)\n return True", "def import_from_csv(self) -> None:\n logging.info('import_from_csv')\n if self.target_table and str(self.target_table).lower() in [\"issue\", \"version\"]:\n if self.file_path and exists(self.file_path):\n # Read CSV file\n csv_data = pd.read_csv(self.file_path).to_dict('records')\n\n # Import Version\n if str(self.target_table).capitalize() == \"Version\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Version).delete()\n click.echo('Overwrite Version table')\n\n for version in csv_data:\n if all(item in list(version.keys()) for item in ['tag', 'start_date', 'end_date']):\n newVersion=Version(\n project_id=version['project_id'],\n name=version[\"name\"], \n tag=version[\"tag\"], \n start_date=datetime.strptime(version[\"start_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n end_date=datetime.strptime(version[\"end_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n )\n \n try:\n self.session.add(newVersion)\n compute_version_metrics(self.session, self.configuration.current_branch, newVersion.project_id)\n click.echo('Importing ' + str(len(csv_data)) + ' version(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields')\n\n # Import Issue\n if str(self.target_table).capitalize() == \"Issue\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Issue).delete()\n click.echo('Overwrite Issue table')\n\n for issue in csv_data:\n if all(item in list(issue.keys()) for item in ['number', 'created_at', 'updated_at']):\n newIssue=Issue(\n project_id=issue['project_id'],\n number=issue[\"number\"],\n title=issue[\"title\"],\n created_at=datetime.strptime(issue[\"created_at\"], '%Y-%m-%d %H:%M:%S.%f'),\n updated_at=datetime.strptime(issue[\"updated_at\"], '%Y-%m-%d %H:%M:%S.%f'))\n\n try:\n self.session.add(newIssue)\n click.echo('Importing ' + str(len(csv_data)) + ' issue(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields') \n\n self.session.commit()\n else:\n logging.error('File not found')\n sys.exit('File not found')\n else:\n logging.error('Target table not found')\n sys.exit('Target table not found')", "def _in_place(self, path, dialect, encoding):\n tmpfd, tmpfname = tempfile.mkstemp(prefix=\"clevercsv_\", suffix=\".csv\")\n tmpid = os.fdopen(tmpfd, \"w\", newline=\"\", encoding=encoding)\n self._write_to_stream(path, tmpid, dialect, encoding)\n tmpid.close()\n\n previous_sha1 = sha1sum(path)\n new_sha1 = sha1sum(tmpfname)\n if previous_sha1 == new_sha1:\n os.unlink(tmpfname)\n return 0\n\n shutil.move(tmpfname, path)\n return 2", "def test_from_file_xls(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.xls')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def read_xls_csv(self):\n filename = str(self.filename)\n location_stock_id = self.location\n vals = []\n inventory_create = self.env['stock.inventory']\n\n if (filename.endswith('xls') or filename.endswith('xlsx')):\n wb = xlrd.open_workbook(\n file_contents=base64.decodestring(self.xls_file))\n sheet = wb.sheet_by_index(0)\n\n for i in range(1, sheet.nrows):\n row = sheet.row_values(i)\n firstrow = sheet.row_values(0)\n firstrow = [str(item).lower() for item in firstrow]\n pid = row[firstrow.index('id')]\n quantity = row[firstrow.index('quantity')]\n product_obj = self.env['product.product'].search(\n [('id', '=', pid)])\n vals.append({\n 'product_code': product_obj.default_code,\n 'product_qty': quantity,\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n\n else:\n xls_file = base64.b64decode(self.xls_file)\n file_input = cStringIO.StringIO(xls_file)\n file_input.seek(0)\n rows = []\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n for row in reader:\n rows.append(row)\n for row in rows[1:]:\n rows[0] = [str(item).lower() for item in rows[0]]\n product_obj = self.env['product.product'].search(\n [('id', '=', row[rows[0].index('id')])])\n vals.append({\n 'product_code': row[rows[0].index('id')],\n 'product_qty': row[rows[0].index('quantity')],\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n return {\n 'name': 'Stock import',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'res_id': self.id,\n 'view_mode': 'tree,form',\n 'res_model': 'stock.inventory',\n 'target': 'current',\n }", "def validate_file(self):\n filename = str(self.filename)\n self.is_error = False\n self.message = \"\"\n if not (filename.endswith('xls') or filename.endswith('xlsx') or filename.endswith('csv')):\n self.message += \"Please Import only '.xls' or '.xlsx' or '.csv' File.\"\n elif (filename.endswith('xls') or filename.endswith('xlsx')):\n column_list = ['id', 'quantity']\n\n wb = xlrd.open_workbook(\n file_contents=base64.decodestring(self.xls_file))\n sheet = wb.sheet_by_index(0)\n row = sheet.row_values(0)\n invalid_cols = []\n import pdb;pdb.set_trace()\n for key in row:\n key = key.encode('ascii', 'ignore')\n if key.lower() not in column_list:\n invalid_cols.append(key)\n if invalid_cols:\n self.message = \"Invalid Column Name %s\", ', '.join(\n invalid_cols)\n if not self.message:\n for i in range(1, sheet.nrows):\n row = sheet.row_values(i)\n firstrow = sheet.row_values(0)\n firstrow = [str(item).lower() for item in firstrow]\n product_obj = self.env['product.product'].search(\n [('id', '=', row[firstrow.index('id')])])\n if not row[firstrow.index('quantity')]:\n self.message += \"Enter Quantity In Your Excel File\"\n if not product_obj:\n self.message += \"Enter Valid Product Id In Your Excel File\"\n else:\n column_list = ['id', 'quantity']\n xls_file = base64.b64decode(self.xls_file)\n file_input = cStringIO.StringIO(xls_file)\n file_input.seek(0)\n rows = []\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n for row in reader:\n rows.append(row)\n firstrow = [str(item).lower() for item in rows[0]]\n match = [column for column in firstrow if column not in column_list]\n if match:\n self.message += \"Enter Valid Column Name\"\n if not self.message:\n for row in rows[1:]:\n rows[0] = [str(item).lower() for item in rows[0]]\n product_obj = self.env['product.product'].search(\n [('id', '=', row[rows[0].index('id')])])\n if not row[rows[0].index('quantity')]:\n self.message += \"Enter Quantity In Your Excel File\"\n if not product_obj:\n self.message += \"Enter Valid Product Id In Your Excel File\"\n\n if self.message:\n self.is_error = True\n if not self.is_error:\n self.is_validate = True\n return {\n 'res_id': self.id,\n 'view_id': self.env.ref('import_stock_inventory_drc.import_stock_inventory_view_wizard_form').ids,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'stock.inventory.wizard',\n 'type': 'ir.actions.act_window',\n 'target': 'new'\n }", "def import_csv(\n self,\n source: DataImportSource,\n filepath: Path,\n **kwargs: Any,\n ) -> tuple[bool, str]:\n importer_type = source.get_importer_type()\n importer = importer_type(db=self.db)\n success, msg = importer.import_csv(filepath=filepath, **kwargs)\n return success, msg", "def add_csv(filename):\n with open(f'{filename}') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n new_entry = False\n name = row['product_name']\n price = clean_price(row['product_price'])\n quantity = int(row['product_quantity'])\n date = clean_date(row['date_updated'])\n query = session.query(Product).filter_by(product_name=row['product_name'])\n\n if query.count() == 0:\n new_entry = True\n else:\n for item in query:\n if date > item.date_updated:\n item.product_price = price\n item.product_quantity = quantity\n item.date_updated = date\n session.add(item)\n\n if new_entry:\n product = Product(product_name=name, product_price=price,\n product_quantity=quantity, date_updated=date)\n session.add(product)\n session.commit()", "def load_data(self):\n df= self.read_file()\n for row,col in df.iterrows():\n Employeeid = int(col['Empolyeeid'])\n Employee_Name = col['Employee_Name']\n Age = col['Age']\n Salary = col['Salary']\n self.table.put_item(\n Item={\n \"Employeeid\":Employeeid,\n \"Employee_Name\": Employee_Name,\n \"Age\": Age,\n \"Salary\": Salary\n }\n )\n return True", "def _verify_csv_file_report(self, report_store, expected_data):\n report_csv_filename = report_store.links_for(self.course.id)[0][0]\n report_path = report_store.path_to(self.course.id, report_csv_filename)\n with report_store.storage.open(report_path) as csv_file:\n csv_file_data = csv_file.read()\n # Removing unicode signature (BOM) from the beginning\n csv_file_data = csv_file_data.decode(\"utf-8-sig\")\n for data in expected_data:\n self.assertIn(data, csv_file_data)", "def process_csv_data(file_for_processing: FileForProcessing):\n \n if file_for_processing.file_to_process.os_type == ANDROID_API:\n # Do fixes for Android\n if file_for_processing.data_type == ANDROID_LOG_FILE:\n file_for_processing.file_contents = fix_app_log_file(\n file_for_processing.file_contents, file_for_processing.file_to_process.s3_file_path\n )\n \n header, csv_rows_list = csv_to_list(file_for_processing.file_contents)\n if file_for_processing.data_type != ACCELEROMETER:\n # If the data is not accelerometer data, convert the generator to a list.\n # For accelerometer data, the data is massive and so we don't want it all\n # in memory at once.\n csv_rows_list = list(csv_rows_list)\n \n if file_for_processing.data_type == CALL_LOG:\n header = fix_call_log_csv(header, csv_rows_list)\n if file_for_processing.data_type == WIFI:\n header = fix_wifi_csv(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n else:\n # Do fixes for iOS\n header, csv_rows_list = csv_to_list(file_for_processing.file_contents)\n \n if file_for_processing.data_type != ACCELEROMETER:\n csv_rows_list = list(csv_rows_list)\n \n # Memory saving measure: this data is now stored in its entirety in csv_rows_list\n file_for_processing.clear_file_content()\n \n # Do these fixes for data whether from Android or iOS\n if file_for_processing.data_type == IDENTIFIERS:\n header = fix_identifier_csv(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n if file_for_processing.data_type == SURVEY_TIMINGS:\n header = fix_survey_timings(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n \n header = b\",\".join([column_name.strip() for column_name in header.split(b\",\")])\n if csv_rows_list:\n return (\n # return item 1: the data as a defaultdict\n binify_csv_rows(\n csv_rows_list,\n file_for_processing.file_to_process.study.object_id,\n file_for_processing.file_to_process.participant.patient_id,\n file_for_processing.data_type,\n header\n ),\n # return item 2: the tuple that we use as a key for the defaultdict\n (\n file_for_processing.file_to_process.study.object_id,\n file_for_processing.file_to_process.participant.patient_id,\n file_for_processing.data_type,\n header\n )\n )\n else:\n return None, None", "def importXlsxIntoDb(input):\n #import global variable\n global UPLOAD_ID\n global PATIENT_NUM\n global DATABASE\n\n connection = db.create_connection(DATABASE)\n\n xlsx = pd.read_excel(input)\n\n #looping on each row\n print(\" - Importing data in DB\", end = '')\n for index, row in xlsx.iterrows():\n if (pd.isna(row['DATE_MORT']) == False):\n DEATH_DATE = row['DATE_MORT']\n DEATH_CODE = 1\n else :\n DEATH_DATE = None #insert null in db\n DEATH_CODE = 0\n if (pd.isna(row['NOM_JEUNE_FILLE']) == False):\n MAIDEN_NAME = row['NOM_JEUNE_FILLE']\n else:\n MAIDEN_NAME = None\n db.insert_patient(connection, (PATIENT_NUM, row['NOM'], row['PRENOM'], row['DATE_NAISSANCE'], row['SEXE'], MAIDEN_NAME, row['ADRESSE'], row['TEL'], row['CP'], row['VILLE'], DEATH_DATE, row['PAYS'], DEATH_CODE, UPLOAD_ID))\n db.insert_patient_ipphist(connection, (PATIENT_NUM, row['HOSPITAL_PATIENT_ID'], \"export_patient.xlsx\", 0, UPLOAD_ID))\n PATIENT_NUM = PATIENT_NUM + 1\n UPLOAD_ID = UPLOAD_ID + 1\n if (index % 100 == 0):\n print(\".\", end = '')\n #commit the changes to db\t\t\t\n connection.commit()\n #close the connection\n connection.close()\n print(\"\\n\")", "def hash_control(self):\n import hashlib\n from datetime import datetime\n import shutil\n\n # Generate hash with file content\n h = hashlib.md5()\n f = open(self.response, 'r')\n h.update(f.read())\n\n # Copy file to repository\n session = model.Session\n #metadata = model.metadata\n\n # Create table if it doesn't exists\n setup_model()\n\n # First check if hash is already in database\n results = session.query(DataRepository.hash).filter_by(hash=h.hexdigest()).all()\n #self.log(results)\n\n if len(results) > 0:\n #log.error('This file %s has the same hash of a file already in\\\n # database. Aborting' % self.response)\n self.log( 'This file %s has the same hash of a file already in\\\n database. Aborting' % self.response)\n os.remove(self.response)\n return True\n\n # Today's date\n file_date = datetime.today()\n\n # Filename hash to store\n filename, extension = os.path.splitext(os.path.basename(self.response))\n h2 = hashlib.md5()\n h2.update(file_date.__str__() + filename)\n filename = h2.hexdigest() + extension\n\n # Now add full repository path to filename\n filename2 = os.path.join(self.repository,filename)\n\n # Now insert data and copy file to repository\n #log.warning('Inserting file %s in repository' % self.response)\n self.log('Inserting file %s in repository' % self.response)\n\n # Copy file to repository\n shutil.copy2(self.response,filename2)\n\n # insert info in database\n repository = DataRepository(hash=h.hexdigest(), creation_date=file_date.today(), original_file = filename2, package_file=self.response)\n session.add(repository)\n session.commit()\n\n #log.warning('File inserted')\n self.log('File inserted')\n\n # Remove other file\n os.remove(self.response)\n\n self.response = filename2\n\n return False", "def process_file(self, filename):\n import math\n try:\n my_file = open(filename, \"r\")\n except FileNotFoundError:\n return False\n\n for next_line in my_file:\n my_tuple = tuple(next_line.split(\",\"))\n if my_tuple[0].isdigit() and my_tuple[3] == 'TEMP':\n time_of_day = math.floor(float(my_tuple[1]) * 24)\n temp = my_tuple[4].rstrip()\n new_tuple = (int(my_tuple[0]), time_of_day, int(my_tuple[2]), float(temp))\n self._data_set.append(new_tuple)\n return True", "def test_from_file_csv(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.csv')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def import_excel(self, filepath_excel,database_type):\n if database_type == \"render\":\n try:\n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO render_information (\n object_type,\n name,\n radius,\n polar_angle_min,\n polar_anglel_max,\n polar_angle_segments,\n polar_angle_random_rad,\n azimuth_angle_min,\n azimuth_angle_max,\n azimuth_angle_segments,\n azimuth_angle_random_rad,\n tracking_obj,\n segmentation\n )\n VALUES (\n :object_type,\n :name,\n :radius,\n :polar_angle_min,\n :polar_anglel_max,\n :polar_angle_segments,\n :polar_angle_random_rad,\n :azimuth_angle_min,\n :azimuth_angle_max,\n :azimuth_angle_segments,\n :azimuth_angle_random_rad,\n :tracking_obj,\n :segmentation\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"render data addet from excel file\")\n except :\n print(\"adding render data from excel file failed\")\n\n elif database_type == \"object\":\n try:\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO object_information (\n obj_filepath,\n obj_name,\n obj_type,\n obj_scale_factor,\n obj_type,\n obj_location_x,\n obj_location_y,\n obj_location_z,\n obj_rotation_x,\n obj_rotation_y,\n obj_rotation_z,\n obj_amount_percent,\n obj_material_path,\n obj_point_in_time,\n maximum_random_rotation_degree_z,\n maximum_random_translation,\n random_amount\n )\n VALUES (\n :obj_filepath,\n :obj_name,\n :obj_type,\n :obj_scale_factor,\n :obj_type,\n :obj_location_x,\n :obj_location_y,\n :obj_location_z,\n :obj_rotation_x,\n :obj_rotation_y,\n :obj_rotation_z,\n :obj_amount_percent,\n :obj_material_path,\n :obj_point_in_time,\n :maximum_random_rotation_degree_z,\n :maximum_random_translation,\n :random_amount\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n print(csv_reader_object)\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"object data added from excel file\")\n except :\n print(\"adding object data from excel file failed\")\n\n else:\n print(\"no Database found, maybe check spelling in method call??\")\n return", "def import_file(filepath, db):\n # Logging\n log_main = logging.getLogger(__name__)\n log_import = log_main.getChild('import_files')\n log_import = log_import.getChild(filepath.split('/')[-1])\n log_import.info('started')\n start = time()\n\n # Variables used in data processing\n memory_buff = StringIO()\n curr = None\n cols = ['tweetID', 'date', 'message', 'username', 'userID', 'language',\n 'longitude', 'latitude', 'retweet']\n sql = \"\"\"COPY \"raw_tweets\" (\"tweetID\", \"date\", \"message\", \"username\", \"userID\", \"language\", \"longitude\", \"latitude\", \"retweet\") \n FROM STDIN \n WITH (FORMAT CSV, HEADER TRUE, DELIMITER '\\t');\n \"\"\"\n \n # Try reading the file\n try:\n df = pd.read_csv(filepath, \n usecols=cols, engine='c', \n memory_map=True, low_memory=False,\n dtype={'userID': np.int64, 'tweetID': np.int64})\n except Exception as e:\n log_import.warn('error on read_csv')\n memory_buff.close()\n print (e)\n return\n\n # Attempt to open up a connection to database.\n try:\n connn = db.connect()\n conn = db.raw_connection()\n curr = conn.cursor()\n except (Exception) as e:\n log_import.warn('error on server connection')\n memory_buff.close()\n if curr is not None:\n curr.close()\n print (e)\n return\n\n # Try copying the files to table.\n try:\n # Save to our buffer\n df[cols].to_csv(memory_buff, sep='\\t',\n header=True, index=False, encoding='utf-8')\n\n # Point buffer to start of memory block\n memory_buff.seek(0)\n\n # Copy records using native Postgres COPY command (FAST)\n curr.copy_expert(sql, memory_buff)\n\n # Save transaction and commit to DB\n conn.commit()\n except (Exception) as e:\n log_import.warn('error while copying to database')\n memory_buff.close()\n if curr is not None:\n curr.close()\n print (e)\n return\n finally:\n memory_buff.close()\n if curr is not None:\n curr.close()\n log_import.info('finished ({:.2f})'.format(time() - start))\n return", "def _process_data_file(self, manifest_row):\n # get the file object for the data\n csv_reader = DataReader(meta=self.meta,\n manifest_row=manifest_row,\n load_from=\"file\")\n\n # get file path for storing clean PSV files\n temp_filepath = self._get_temp_filepath(manifest_row=manifest_row)\n\n # validate and clean\n self._load_single_file(table_name=manifest_row['destination_table'],\n manifest_row=manifest_row,\n csv_reader=csv_reader,\n temp_filepath=temp_filepath)", "def test_from_file_xlsx(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.xlsx')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def _check_duplicate_id_csv(self):\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True", "def copy_from_file(conn, df, table):\n # Save the dataframe to disk\n tmp_df = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"tmp_dataframe.csv\")\n df.to_csv(tmp_df, index=False, header=False)\n f = open(tmp_df, 'r')\n cursor = conn.cursor()\n try:\n cursor.copy_from(file=f, table=table, sep=\",\", columns=('event_number', 'priority', 'address', 'is_incident',\n 'geometry_wkt', 'timestamp', 'disposition'))\n cursor.execute(\n f\"DELETE FROM {table} A USING {table} B WHERE A.ctid < B.ctid AND A.event_number = B.event_number AND A.priority = B.priority AND A.address = B.address AND A.is_incident = B.is_incident AND A.geometry_wkt = B.geometry_wkt AND A.timestamp = B.timestamp AND A.disposition = B.disposition\")\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n os.remove(tmp_df)\n logger.error(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n logger.info(\"copy_from_file() done\")\n cursor.close()\n os.remove(tmp_df)", "def import_data(file):\n df = pd.read_csv(file, parse_dates=True, keep_date_col=True, sep=';')\n df = reduce_mem_usage(df)\n return df", "def upload_sheet(self, request):\n file = self.request.data['file']\n\n # validating requested payload.\n if not file:\n return Response(\"Got no file! Please hit me again with file.\")\n # Only .csv/xls format file are allowed\n if file.name.rsplit('.')[1] == 'csv':\n sheet_as_df = pd.read_csv(file)\n elif file.name.rsplit('.')[1] == 'xls':\n sheet_as_df = pd.read_excel(file)\n else:\n return Response(\"Only .csv/.xls format type allowed for now.\")\n\n # sheet uploading code\n # =============Logic Start================\n header = ['last_name', 'first_name', 'state', 'phone_number']\n df = sheet_as_df\n if not set(header).issubset(df.columns):\n return False, f'Please check uploading sheet matching headers as: {header}'\n # filling empty(NaN) of data-frame entry with 0.0\n df = df.fillna(0)\n from itertools import islice\n batch_size = 100\n while True:\n content_instance = [Content(\n first_name=record['first_name'],\n last_name=record['last_name'],\n state=record['state'],\n phone_number=record['phone_number']\n ) for record in islice(df.to_dict('records'), batch_size)]\n if not content_instance:\n logger.info('Unable to update PhoneBook model with entries.')\n break\n PhoneBook.objects.bulk_create(content_instance, batch_size)\n # =============Logic End==================\n\n return Response('Successfully updated order entry!')" ]
[ "0.55844575", "0.54056644", "0.53893214", "0.53867716", "0.5307293", "0.5246726", "0.5222508", "0.5163896", "0.5116722", "0.509766", "0.509731", "0.5044055", "0.5019669", "0.4979874", "0.49676162", "0.49665996", "0.4965659", "0.49415362", "0.49070588", "0.487215", "0.48677886", "0.4840013", "0.48059613", "0.4801619", "0.479827", "0.47943518", "0.47663924", "0.4752241", "0.47052962", "0.46718183" ]
0.6357831
0
Decorator for class functions to catch errors and display a dialog box for a success or error. Checks if method is a bound method in order to properly handle parents for dialog box.
def errorCheck(success_text=None, error_text="Error!",logging=True,show_traceback=False,skip=False): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): if inspect.ismethod(func): self = args[0] else: self = None if skip: return func(*args, **kwargs) try: return func(*args, **kwargs) if success_text: success_dialog = QtWidgets.QMessageBox(self) success_dialog.setText(success_text) success_dialog.setWindowModality(QtCore.Qt.WindowModal) success_dialog.exec() except Exception as e: error_dialog = QtWidgets.QMessageBox(self) error_dialog.setWindowModality(QtCore.Qt.WindowModal) error_dialog.setText(error_text) if logging: logger.exception(traceback.format_exc()) if show_traceback: error_dialog.setInformativeText(traceback.format_exc()) else: error_dialog.setInformativeText(str(e)) error_dialog.exec() return wrapper return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error_handler(call_on_errors):\n assert callable(call_on_errors)\n def entangle(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n try:\n return method(self, *args, **kwargs)\n except InputInvalidException:\n return call_on_errors(self, *args, **kwargs)\n return wrapper\n return entangle", "def handle_errors_nicely(func):\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except click.ClickException as e:\n # Let click handle this, it'll show the relevant erroe message and\n # exit with a non-zero code.\n raise\n except:\n click.echo(\"unknown error\")\n # TODO: don't just blindly raise! Deal with it better\n raise\n\n return wrapped", "def error(self, *args, **kwargs):", "def __call__(self, *args, **kwargs):\r\n return self.error(*args, **kwargs)", "def _show_traceback(method):\n def m(self, *args, **kwargs):\n try:\n return(method(self, *args, **kwargs))\n except Exception as e:\n ip = get_ipython()\n if ip is None:\n self.log.warning(\"Exception in widget method %s: %s\", method, e, exc_info=True)\n else:\n ip.showtraceback()\n return m", "def standard_error_handler(error_function):\n\n async def wrapper(cls, ctx, error):\n\n extra = f\"\\n\\nSee the help message for more information.\"\n\n # This prevents any commands with local handlers being handled here\n if hasattr(ctx.command, \"on_error\"):\n return\n\n # Allows us to check for original exceptions raised and sent to CommandInvokeError.\n # If nothing is found. We keep the exception passed to on_command_error.\n error = getattr(error, \"original\", error)\n\n ignored = (commands.CommandNotFound,)\n\n # Anything in ignored will return and prevent anything happening.\n if any([isinstance(error, i) for i in ignored]):\n return\n\n if isinstance(error, DisabledCommand):\n await pretty_print(\n ctx, \"This command is disabled!\", title=\"Error\", color=ERROR_COLOR\n )\n\n elif isinstance(error, MemberNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, RoleNotFound):\n await pretty_print(\n ctx,\n str(error) + \"\\nNote: this command is case-sensitive.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, NoPrivateMessage):\n await pretty_print(\n ctx,\n \"This command cannot be run in a private message.\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, PrivateMessageOnly):\n try:\n await ctx.message.delete()\n extra += \"\\nYour message has been deleted\"\n except:\n print(\"Could not delete message\")\n await pretty_print(\n ctx,\n \"This command should be run in a Private Message only!\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRole):\n await pretty_print(\n ctx, str(error) + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, IllegalRole):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, CheckFailure):\n await pretty_print(\n ctx,\n \"Could not run command, do you have sufficient permissions in this channel?\"\n + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, BadArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Could not run command, is it formatted properly?\" + extra,\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, MissingRequiredArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx, \"Missing required arguments\", title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, BadUnionArgument):\n await ctx.send_help(ctx.command)\n await pretty_print(\n ctx,\n \"Invalid argument\",\n title=\"Error\",\n color=ERROR_COLOR,\n )\n return\n\n elif isinstance(error, WalletNotVerified):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, InvalidCoin):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n\n elif isinstance(error, RequestError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n elif isinstance(error, FatalError):\n await pretty_print(\n ctx, error.message + extra, title=\"Error\", color=ERROR_COLOR\n )\n return\n await error_function(cls, ctx, error)\n\n return wrapper", "def exsafe(func):\n error_msg_template=\"{{}} executing function '{}':\".format(func.__name__)\n @func_utils.getargsfrom(func,hide_outer_obj=True) # PyQt slots don't work well with bound methods\n def safe_func(*args, **kwargs):\n with exint(error_msg_template=error_msg_template):\n return func(*args,**kwargs)\n return safe_func", "def error_aware(method):\n\n def _request(request_handler, *args):\n \"\"\"Surround request_handler.method(*args) with try/except for errors.\n\n Args:\n request_handler: Request handler which method is being called.\n \"\"\"\n try:\n method(request_handler, *args)\n except Error, error:\n response_body = {\n 'error': {\n 'status': error.code,\n 'message': error.message\n }\n }\n request_handler.response.clear()\n request_handler.response.set_status(error.code)\n util.write_response(request_handler, response_body)\n return _request", "def setErrorDelegate(self, func):\r\n # Assign the user function to the internal callback handle\r\n self.errorDelegate = func", "def error(self, func):\n self.error_handler = func\n return func", "def wrapper_view_error(\n view: Any = None, class_exception: Any = None, status: int = None\n) -> Any:\n\n def _decorate(function):\n @functools.wraps(function)\n def wrapped_function(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except class_exception as obj_exception:\n return Response(data={\"error\": obj_exception.message}, status=status)\n\n return wrapped_function\n\n if view:\n return _decorate(view)\n return _decorate", "def _ui_method(self, method):\n raise NotImplementedError()", "def convert_libvirt_errors(method):\n def wrapper(self, *args, **kw):\n try:\n return method(self, *args, **kw)\n except libvirt.libvirtError as ex:\n code = ex.get_error_code()\n exstr = str(ex).lower()\n if code == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT:\n err = \"snapshot_not_found\"\n msg = \"snapshot {0!r} not found for {1!r}\".format(args[0], self.name)\n elif \"domain is already running\" in exstr:\n # code == libvirt.VIR_ERR_OPERATION_INVALID\n err = \"vm_online\"\n msg = \"vm {0!r} is already running\".format(self.name)\n elif \"domain is not running\" in exstr:\n # code == libvirt.VIR_ERR_OPERATION_INVALID\n err = \"vm_offline\"\n msg = \"vm {0!r} is not running\".format(self.name)\n elif re.search(r\"snapshot file for disk \\S+ already exists\", exstr) or \\\n re.search(r\"domain snapshot \\S+ already exists\", exstr):\n # code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED or libvirt.VIR_ERR_INTERNAL_ERROR\n err = \"snapshot_exists\"\n msg = \"snapshot {0!r} already exists for {1!r}\".format(args[0], self.name)\n else:\n raise LVPError(\"unexpected libvirt error: {0.__class__.__name__}: {0}\".format(ex), code=code) # pylint: disable=E1306\n\n if err not in getattr(method, \"ignore_libvirt_errors\", []):\n raise LVPError(msg, code=code)\n\n wrapper.__doc__ = method.__doc__\n wrapper.__name__ = method.__name__\n return wrapper", "def errors_wrapped(func):\n\n async def wrapped(self, *args, **kwargs):\n try:\n return await func(self, *args, **kwargs)\n except AuthenticationRequiredError as ex:\n logger.warning(f\"Trying to use unauth access: {ex}\")\n add_message(self.request, \"LogIn to continue.\")\n redirect(self.request, \"sign_in\")\n\n except BaseApplicationError as ex:\n message = getattr(ex, \"message\", None) or str(ex)\n details = getattr(ex, \"details\", None)\n if details:\n message = f\"{message}: {details}\"\n\n add_message(self.request, message, kind=\"error\")\n raise web.HTTPFound(self.request.path)\n\n return wrapped", "def handle_errors(func):\n def wrapper(*args, **kwargs):\n try:\n response = func(*args, **kwargs)\n except Exception as e:\n response = jsonify({\"success\": False, \"message\": str(e)})\n return response\n wrapper.func_name = func.func_name\n return wrapper", "def authentication_required(meth):\n\n def check(cls, *args, **kwargs):\n if cls.authenticated:\n return meth(cls, *args, **kwargs)\n raise Error(\"Authentication required\")\n\n return check", "def error(self, message=None, show_help=True):", "def decorator(func):\n func.error_message = msg\n return func", "def _on_error(self, type, value, tb):\n \n # get exception\n exception = traceback.format_exception(type, value, tb)\n exception = '\\n'.join(exception)\n \n # show error message\n dlg = ErrorDlg(self, exception)\n dlg.ShowModal()\n dlg.Destroy()", "def catching(self, error) -> \"fn\":\n return self._mod.catching(error, self)", "def dlg_validate(self):\n return(True) # override", "def handle_fb_error():\n def deco_handle(f):\n def f_handle(*args, **kwargs):\n self = args[0]\n try:\n return f(*args, **kwargs)\n except:\n this_exception = sys.exc_info()\n status_msg = None\n try:\n # don't wait long, the status msg should be there already\n self.driver.implicitly_wait(1)\n status_msg=self.driver.find_element_by_class_name('status-msg')\n raise AssertionError('found fb status-msg: %s' % status_msg.text)\n except:\n # if it has info, re-raise\n if status_msg:\n if len(status_msg.text) > 0:\n raise\n # we didn't find a status_msg, just re-raise the original\n raise this_exception[1], None, this_exception[2]\n return f_handle\n return deco_handle", "def error(self):\n ...", "def decorator_func(func):\r\n @functools.wraps(func)\r\n def with_status_check(obj, *args, **kwargs):\r\n if obj.status not in valid_start_statuses:\r\n exception_msg = (\r\n u\"Error calling {} {}: status is '{}', must be one of: {}\"\r\n ).format(func, obj, obj.status, valid_start_statuses)\r\n raise VerificationException(exception_msg)\r\n return func(obj, *args, **kwargs)\r\n\r\n return with_status_check", "def error(self, handler):\n pass", "def indicate_error(\n self, *args,\n mean=None, means=None, median=None, medians=None,\n barstd=None, barstds=None, barpctile=None, barpctiles=None, bardata=None,\n boxstd=None, boxstds=None, boxpctile=None, boxpctiles=None, boxdata=None,\n shadestd=None, shadestds=None, shadepctile=None, shadepctiles=None, shadedata=None,\n fadestd=None, fadestds=None, fadepctile=None, fadepctiles=None, fadedata=None,\n boxmarker=None, boxmarkercolor='white',\n boxcolor=None, barcolor=None, shadecolor=None, fadecolor=None,\n shadelabel=False, fadelabel=False, shadealpha=0.4, fadealpha=0.2,\n boxlinewidth=None, boxlw=None, barlinewidth=None, barlw=None, capsize=None,\n boxzorder=2.5, barzorder=2.5, shadezorder=1.5, fadezorder=1.5,\n **kwargs\n):\n method = kwargs.pop('_method')\n name = method.__name__\n bar = name in ('bar',)\n flip = name in ('barh', 'plotx', 'scatterx') or kwargs.get('vert') is False\n plot = name in ('plot', 'scatter')\n violin = name in ('violinplot',)\n means = _not_none(mean=mean, means=means)\n medians = _not_none(median=median, medians=medians)\n barstds = _not_none(barstd=barstd, barstds=barstds)\n boxstds = _not_none(boxstd=boxstd, boxstds=boxstds)\n shadestds = _not_none(shadestd=shadestd, shadestds=shadestds)\n fadestds = _not_none(fadestd=fadestd, fadestds=fadestds)\n barpctiles = _not_none(barpctile=barpctile, barpctiles=barpctiles)\n boxpctiles = _not_none(boxpctile=boxpctile, boxpctiles=boxpctiles)\n shadepctiles = _not_none(shadepctile=shadepctile, shadepctiles=shadepctiles)\n fadepctiles = _not_none(fadepctile=fadepctile, fadepctiles=fadepctiles)\n bars = any(_ is not None for _ in (bardata, barstds, barpctiles))\n boxes = any(_ is not None for _ in (boxdata, boxstds, boxpctiles))\n shade = any(_ is not None for _ in (shadedata, shadestds, shadepctiles))\n fade = any(_ is not None for _ in (fadedata, fadestds, fadepctiles))\n if means and medians:\n warnings._warn_proplot('Cannot have both means=True and medians=True. Using former.') # noqa: E501\n\n # Get means or medians while preserving metadata for autoformat\n # TODO: Permit 3D array with error dimension coming first\n # NOTE: Previously went to great pains to preserve metadata but now retrieval\n # of default legend handles moved to _auto_format_1d so can strip.\n x, y, *args = args\n data = y\n if means or medians:\n if data.ndim != 2:\n raise ValueError(f'Expected 2D array for means=True. Got {data.ndim}D.')\n if not any((bars, boxes, shade, fade)):\n bars = barstds = True\n if violin:\n boxes = boxstds = True\n if means:\n y = np.nanmean(data, axis=0)\n elif medians:\n y = np.nanpercentile(data, 50, axis=0)\n\n # Parse keyword args and apply defaults\n # NOTE: Should not use plot() 'linewidth' for bar elements\n # NOTE: violinplot_extras passes some invalid keyword args with expectation\n # that indicate_error pops them and uses them for error bars.\n getter = kwargs.pop if violin else kwargs.get if bar else lambda *args: None\n boxmarker = _not_none(boxmarker, True if violin else False)\n capsize = _not_none(capsize, 3.0)\n linewidth = _not_none(getter('linewidth', None), getter('lw', None), 1.0)\n barlinewidth = _not_none(barlinewidth=barlinewidth, barlw=barlw, default=linewidth)\n boxlinewidth = _not_none(boxlinewidth=boxlinewidth, boxlw=boxlw, default=4 * barlinewidth) # noqa: E501\n edgecolor = _not_none(getter('edgecolor', None), 'k')\n barcolor = _not_none(barcolor, edgecolor)\n boxcolor = _not_none(boxcolor, barcolor)\n shadecolor_infer = shadecolor is None\n fadecolor_infer = fadecolor is None\n shadecolor = _not_none(shadecolor, kwargs.get('color'), kwargs.get('facecolor'), edgecolor) # noqa: E501\n fadecolor = _not_none(fadecolor, shadecolor)\n\n # Draw dark and light shading\n getter = kwargs.pop if plot else kwargs.get\n eobjs = []\n fill = self.fill_betweenx if flip else self.fill_between\n if fade:\n edata, label = _get_error_data(\n data, y, errdata=fadedata, stds=fadestds, pctiles=fadepctiles,\n stds_default=(-3, 3), pctiles_default=(0, 100), absolute=True,\n reduced=means or medians, label=fadelabel,\n )\n eobj = fill(\n x, *edata, linewidth=0, label=label,\n color=fadecolor, alpha=fadealpha, zorder=fadezorder,\n )\n eobjs.append(eobj)\n if shade:\n edata, label = _get_error_data(\n data, y, errdata=shadedata, stds=shadestds, pctiles=shadepctiles,\n stds_default=(-2, 2), pctiles_default=(10, 90), absolute=True,\n reduced=means or medians, label=shadelabel,\n )\n eobj = fill(\n x, *edata, linewidth=0, label=label,\n color=shadecolor, alpha=shadealpha, zorder=shadezorder,\n )\n eobjs.append(eobj)\n\n # Draw thin error bars and thick error boxes\n sy = 'x' if flip else 'y' # yerr\n ex, ey = (y, x) if flip else (x, y)\n if boxes:\n edata, _ = _get_error_data(\n data, y, errdata=boxdata, stds=boxstds, pctiles=boxpctiles,\n stds_default=(-1, 1), pctiles_default=(25, 75),\n reduced=means or medians,\n )\n if boxmarker:\n self.scatter(\n ex, ey, s=boxlinewidth, marker='o', color=boxmarkercolor, zorder=5\n )\n eobj = self.errorbar(\n ex, ey, color=boxcolor, linewidth=boxlinewidth, linestyle='none',\n capsize=0, zorder=boxzorder, **{sy + 'err': edata}\n )\n eobjs.append(eobj)\n if bars: # now impossible to make thin bar width different from cap width!\n edata, _ = _get_error_data(\n data, y, errdata=bardata, stds=barstds, pctiles=barpctiles,\n stds_default=(-3, 3), pctiles_default=(0, 100),\n reduced=means or medians,\n )\n eobj = self.errorbar(\n ex, ey, color=barcolor, linewidth=barlinewidth, linestyle='none',\n markeredgecolor=barcolor, markeredgewidth=barlinewidth,\n capsize=capsize, zorder=barzorder, **{sy + 'err': edata}\n )\n eobjs.append(eobj)\n\n # Call main function\n # NOTE: Provide error objects for inclusion in legend, but *only* provide\n # the shading. Never want legend entries for error bars.\n xy = (x, data) if violin else (x, y)\n kwargs.setdefault('_errobjs', eobjs[:int(shade + fade)])\n res = obj = method(self, *xy, *args, **kwargs)\n\n # Apply inferrred colors to objects\n i = 0\n if isinstance(res, tuple): # pull out patch from e.g. BarContainers\n obj = res[0]\n for b, infer in zip((fade, shade), (fadecolor_infer, shadecolor_infer)):\n if not b or not infer:\n continue\n if hasattr(obj, 'get_facecolor'):\n color = obj.get_facecolor()\n elif hasattr(obj, 'get_color'):\n color = obj.get_color()\n else:\n color = None\n if color is not None:\n eobjs[i].set_facecolor(color)\n i += 1\n\n # Return objects\n # NOTE: For now 'errobjs' can only be returned with 1D y coordinates\n # NOTE: Avoid expanding matplolib collections that are list subclasses here\n if not eobjs:\n return res\n elif isinstance(res, tuple) and not isinstance(res, mcontainer.Container):\n return ((*res, *eobjs),) # for plot()\n else:\n return (res, *eobjs)", "def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def jsonable_error(status=500, message=\"The Studio servers encountered an error\"):\r\n def outer(func):\r\n @functools.wraps(func)\r\n def inner(request, *args, **kwargs):\r\n if request.is_ajax():\r\n content = json.dumps({\"error\": message})\r\n return HttpResponse(content, content_type=\"application/json\",\r\n status=status)\r\n else:\r\n return func(request, *args, **kwargs)\r\n return inner\r\n return outer", "def _CatchExceptionDecorator(method):\n @functools.wraps(method)\n def Wrap(*args, **kwargs):\n try:\n return method(*args, **kwargs)\n except Exception:\n logging.warning(\n '%s Exception: %s.', name,\n '\\n'.join(traceback.format_exception_only(\n *sys.exc_info()[:2])).strip())\n return Wrap if enable else method", "def checkIsValid(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n if self.validator.isValid:\n return f(self, *args, **kwargs)\n else:\n error = self.validator._exceptionClass('Called: {} method before data validated'.format(f.__name__))\n self.validator._errors[f.__name__] = error\n if self.validator._errorHandler is not None:\n self.validator._errorHandler(error, self.getValidationContext())\n return\n\n return wrapper" ]
[ "0.60215515", "0.5855564", "0.58513117", "0.5837953", "0.57659924", "0.5627547", "0.5543959", "0.5534907", "0.5445549", "0.54414475", "0.5429948", "0.53747135", "0.53649336", "0.53635114", "0.53361785", "0.53337044", "0.5322622", "0.53081447", "0.52866286", "0.51993626", "0.5182949", "0.5177273", "0.51723087", "0.5168871", "0.51671386", "0.51435435", "0.5128379", "0.5126035", "0.5121002", "0.50983846" ]
0.61921567
0
Normalize weight vector. Negative weights set to zero, and whole vector sums to 1.0.
def normalize_weights(self): # Set negative weights to zero # Normalize to sum to one. self.new_weight=[] for i in self._weights: if any(i < 0 for i in self._weights): self.new_weight = [0,1] elif all(i == 0 for i in self._weights): i = 1/len(self._weights) self.new_weight.append(i) else: i = i/sum(self._weights) self.new_weight.append(i) # If the weights are all zeros, set weights equal to 1/k, where k is the number # of components. self._weights = self.new_weight self._weights = np.round(self._weights,3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW", "def normalize_weights(self, w):\n n = w.astype(np.float64, copy=True)\n c = float(np.sum(w))\n n /= c\n return n", "def normalize(self):\n self.vector /= np.linalg.norm(self.vector)", "def normalize(my_vector):\n my_vector = np.array(my_vector)\n size = len(my_vector)\n\n sum_ = sum(my_vector)\n if sum_ != 0.0:\n for i in range(size):\n my_vector[i] = my_vector[i] / sum_\n return my_vector", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def normalize(self):\n\t\tnorm = self.norm()\n\t\tif norm == 0:\n\t\t\traise ValueError(\"Can't normalize zero vector\")\n\t\treturn self / norm", "def normalize(vector):\n return vector / np.linalg.norm(vector)", "def normalize(w):\n s = sum(w)\n for i in range(len(w)):\n w[i] /= s\n return w", "def normalize(v):\n return v / np.linalg.norm(v)", "def normalize_vector (vector ):\r\n\r\n if (np.sum (vector ) == 0):\r\n #print (\"In normalize_vector: Vector is 0. Returning input vector.\")\r\n return vector\r\n\r\n return vector / np.linalg.norm(vector)", "def normalizeVector(v):\n normalizer = 1.0 / sum(v)\n\n normalized = [i * normalizer for i in v]\n return normalized", "def normalize(v):\n return np.array(v) / np.linalg.norm(v)", "def _mutate_weights(self, weights):\n return weights + normal(loc=0, scale=self.standard_deviation, size=weights.shape[0])", "def normalize_weight(self, Z):\n self.weight /= Z", "def normalize(w: torch.Tensor):\n\n if w.dim() > 1:\n return _matrix(w)\n\n return _vector(w)", "def normalizeFeatureVector(self):\n # Normalize features\n total = 0.0\n for v in self.features.values(): total += abs(v)\n if total == 0.0: \n total = 1.0\n for k,v in self.features.iteritems():\n self.features[k] = float(v) / total", "def ReWeight(Vec):\n Out = Vec\n Exclude = isnan(Vec)\n Out[Exclude] = 0 #set missing to 0\n Out = Out / sum(Out) #normalize\n return(Out)", "def _normalize(weights, axis, log=True):\n if log:\n normalizer = tf.reduce_logsumexp(weights, axis=axis, keepdims=True)\n return weights - normalizer\n normalizer = tf.reduce_sum(weights, axis=axis)\n return weights / normalizer", "def normalize_weights(w, dims=(0,), bias=1e-5):\n with tf.name_scope('normalization'):\n return w / (tf.sqrt(tf.reduce_sum(tf.square(w), dims, keep_dims=True) + bias))", "def normBySum(vector):\n\treturn np.divide(vector,float(sum(vector)))", "def normalize(v):\n\tdim = v.shape \n\tfor i in range(0, dim[0]-1):\n\t\tv[i,:,:] = (v[i,:,:].T/np.sum(v[i,:,:],1)).T\n\n\treturn v", "def normalize(self):\n\n if not self.magnitude():\n return Vector(0, 0)\n\n l = 1 / self.magnitude()\n return self.scale(l)", "def normalize(vec):\n return vec / length(vec)", "def normalize(x):\r\n return x/norm(x)", "def normalize(self, vec):\n length = math.sqrt( vec[0,0]*vec[0,0] + vec[0,1]*vec[0,1] + vec[0,2]*vec[0,2] )\n vnorm = vec / length\n return vnorm", "def normalize(v):\n\n return v * (1.0 / magnitude(v))", "def clip_normalize(w):\n w_clip = np.clip(w, 0, np.inf)\n return w_clip / np.sum(w_clip, axis=0)", "def normalize(self):\n self._vectors = [vector.normalized() for vector in self._vectors]", "def normalize_weights(self, labels, weights):\n if self._ragged:\n labels, _, weights, _ = utils.ragged_to_dense(labels, None, weights)\n return self._normalize_weights_impl(labels, weights)" ]
[ "0.81079364", "0.789293", "0.7727452", "0.77199495", "0.76464504", "0.7522993", "0.7492114", "0.74709684", "0.7452147", "0.74076146", "0.73972994", "0.7382311", "0.72835106", "0.7223835", "0.72054964", "0.71810776", "0.7152016", "0.7151621", "0.71274734", "0.7124058", "0.70973843", "0.70828205", "0.70742863", "0.70547247", "0.70410454", "0.6999866", "0.6985105", "0.6979914", "0.69621617", "0.6959164" ]
0.8004195
1
Returns the path where the .NET2 Framework SDK is installed
def _getNETSDKPath(): try: dotNETSDK_root_key = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\.NETFramework\\v2.0', 0, win32con.KEY_READ) found = False i = 0 try: try: while not found: name, obj, ntype = win32api.RegEnumValue(dotNETSDK_root_key, i) i = i + 1 if name=='InstallationFolder': return obj found = True except: win32api.RegCloseKey(dotNETSDK_root_key) return '' finally: win32api.RegCloseKey(dotNETSDK_root_key) except: return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_windows_sdk_path():\n try:\n import _winreg as winreg\n except ImportError:\n import winreg\n sub_key = r\"Software\\Microsoft\\Microsoft SDKs\\Windows\"\n with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, sub_key) as key:\n name = \"CurrentInstallFolder\"\n return winreg.QueryValueEx(key, name)[0]\n return None", "def GetPath(self):\n sdk_root = config.Paths().sdk_root\n if not sdk_root:\n raise NoSdkRootError()\n return os.path.join(sdk_root, self.name)", "def get_sdk_path(cls):\n pass", "def get_nt2_dir() :\n try :\n d = os.environ[\"NT2_DIR\"]\n except KeyError :\n d = sys.path[0] \n if not d : d = os.getcwd()\n try :\n d1 = re.match(\"(.*/)nt2/\",d).groups()\n except :\n print \"\\n Environment chain 'NT2_DIR' is not defined\"\n print \"and your current path does not contain /nt2/\"\n print \"sorry aborting\"\n print get_nt2_dir.__doc__\n raise SystemExit\n if len(d1)!=0 : d = d1[0]\n return os.path.expanduser(os.path.join(d,\"nt2/include/\"))", "def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")", "def path(self):\n installed_packages_folder_path = site.getsitepackages()[0]\n return f'{installed_packages_folder_path}/{SITE_PACKAGES_FOLDER_NAME}'", "def __find_tool_path(self):\n tool_path = Path(os.path.dirname(os.path.realpath(__file__)))\n # We asume the installion path is relative to our installation path\n tool_path = tool_path / '../../../bin'\n if os.name == 'posix':\n ret = tool_path / 'fast-discovery-server'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n elif os.name == 'nt':\n ret = tool_path / 'fast-discovery-server.exe'\n if not os.path.exists(ret):\n ret = tool_path / 'fast-discovery-server.bat'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n else:\n print(f'{os.name} not supported')\n sys.exit(1)\n\n return ret", "def syspath():\n import sys\n pprint(sys.path)", "def _GetLibraryPath(self, platform, backupPlatform=''):\n if platform == Environment.GetPlatform() or \\\n (backupPlatform and backupPlatform == Environment.GetPlatform()):\n return os.path.split(self._libraryPath)[1]\n return ''", "def bin_path(self) -> Path:\n return self._root_path / \"stefan-on-software-api-client\" / \"bin\"", "def GetAndroidSdkPath():\n return _GetFilePath(FLAGS.android_sdk_path)", "def get_python_path():\n\n return get_executable_path('python')", "def determine_python_path():\n if git_install_requested():\n projects_yaml = config('openstack-origin-git')\n projects_yaml = git_default_repos(projects_yaml)\n return os.path.join(git_pip_venv_dir(projects_yaml),\n 'lib/python2.7/site-packages')\n else:\n return None", "def find_gae_sdk_gcloud():\n try:\n gcloud = find_gcloud()\n except BadEnvironmentError:\n return None\n # 'gcloud' is <sdk_root>/bin/gcloud.\n sdk_root = os.path.dirname(os.path.dirname(gcloud))\n return os.path.join(sdk_root, 'platform', 'google_appengine')", "def get_cfy_agent_path():\n\n return get_executable_path('cfy-agent')", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def path(cls):\n from os.path import sep, join, exists\n from os import environ\n return join(environ.get(\"SystemRoot\", join(\"C:\", sep, \"Windows\")), \"System32\", \"mpclaim.exe\")", "def CoreDirectory():\n thisDir=WindowsPath(__file__).parent.resolve()\n # print(f\"this dir {thisDir}\")\n coreDir=thisDir.parent/\"MacroSystem/core\"\n return coreDir", "def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n # make pythonpack hack: copy this directory one level upper for setup.py\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'xlearn')]\n if sys.platform == 'win32':\n if platform.architecture()[0] == '64bit':\n dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))\n else:\n dll_path.append(os.path.join(curr_path, '../../windows/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/Release/'))\n dll_path = [os.path.join(p, 'xlearn_api.dll') for p in dll_path]\n elif sys.platform.startswith('linux'):\n dll_path = [os.path.join(p, 'libxlearn_api.so') for p in dll_path]\n elif sys.platform == 'darwin':\n dll_path = [os.path.join(p, 'libxlearn_api.dylib') for p in dll_path]\n\n lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]\n\n # From github issues, most of installation errors come from machines w/o compilers\n if not lib_path:\n raise XLearnLibraryNotFound(\n 'Cannot find xlearn Library in the candidate path'\n )\n return lib_path", "def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath", "def _ensure_sdk(self, sdk_dir, sdk_version):\n with self.m.context(infra_steps=True):\n pkgs = self.m.cipd.EnsureFile()\n pkgs.add_package('chrome_internal/third_party/sdk/windows', sdk_version)\n self.m.cipd.ensure(sdk_dir, pkgs)\n return sdk_dir", "def get_exec_path():\n if hasattr(sys, \"frozen\"): # compiled by py2exe\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(sys.path[0]) # should be path to /fpdb", "def systemdir():\n if platform == 'windows':\n return os.path.join(os.environ['ProgramFiles'], 'automaton')\n else:\n return \"/etc/automaton/\"", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def get_appdir():\n\n return APP_PATH", "def get_installation_path():\n file_abs_path = os.path.abspath(__file__)\n real_file_abs_path = os.path.realpath(file_abs_path)\n return real_file_abs_path[:real_file_abs_path.find('/node')]", "def _locate_bootloader():\n pkg_path = os.path.dirname(__file__)\n blpath = os.path.abspath(os.path.join(pkg_path, 'bootloader'))\n if not os.path.isfile(blpath):\n raise InternalError(\"bootloader not found at {}\".format(blpath))\n return blpath", "def _get_mingw_dll_dir():\n gfortran_exe = shutil.which(\"gfortran\")\n if gfortran_exe is None:\n return None\n\n gfortran_exe = pathlib.Path(gfortran_exe)\n bin_dir = gfortran_exe.resolve().parent\n matches = list(bin_dir.glob(\"libgfortran*.dll\"))\n if len(matches) == 0:\n return None\n\n return str(bin_dir)", "def get_sdk_version() -> str:\n return definitions.get_sdk_version()" ]
[ "0.6635156", "0.6456769", "0.62163496", "0.614617", "0.602612", "0.5883742", "0.5740016", "0.5641935", "0.5618597", "0.5578395", "0.556722", "0.55422294", "0.5538291", "0.5534905", "0.5458584", "0.54463655", "0.5429792", "0.538674", "0.53390664", "0.53347325", "0.5324747", "0.5308394", "0.5295824", "0.528417", "0.5268813", "0.5268232", "0.5249346", "0.5233713", "0.5214236", "0.5211446" ]
0.7449213
0
Add Builders and construction variables for tlbimp to an Environment.
def generate(env): if not exists(env): return 0; TLBImpBuilder = env.Builder( action = SCons.Action.Action( TLBImpGenerator , generator = 1 #, cmdstr = "$TLBIMPCOMSTR" ) , src_suffix = '.dll' , target_suffix = '.dll' ) dotNETSDK = _getNETSDKPath() homedir = env.Dir(dotNETSDK) bindir = homedir.Dir('bin') env['TLBIMP'] = 'tlbimp.exe' env['TLBIMPFLAGS'] = '/nologo /silent /strictref:nopia' env['TLBIMPCOMSTR'] = '[.NET] TLBIMP: Generating interop assembly for typelib in: $SOURCE to: $TARGET' env['BUILDERS']['TLBImp'] = TLBImpBuilder # Agrego al PATH el directorio del tlbimp env.PrependENVPath( 'PATH', bindir.abspath )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetupEnvironment(self):\n pass", "def initialize():\n environment = Environment()\n environment.setup()", "def _setup_environment_vars(self, opts):\n # Check that these directories actually exist\n assert os.path.isdir(opts.movie_advisor_home)\n\n #if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home)\n\n self.movie_advisor_home = opts.movie_advisor_home\n self.bento_home = opts.bento_home\n self.bento_tgz = opts.bento_tgz\n self.kiji_uri = \"kiji://.env/tutorial\"\n\n # \"express job\" takes a jar file as an argument\n assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar))\n\n # Set the classpath for all of the commands that we'll run\n jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars]\n for jar in jarsFullPaths: assert os.path.isfile(jar)\n\n classpath = \":\".join(jarsFullPaths)\n os.environ['KIJI_CLASSPATH'] = classpath\n\n if opts.show_classpath:\n print(\"export KIJI_CLASSPATH=%s\" % classpath)\n sys.exit(0)", "def setUpEnv(self):\n \n robot = Robot('atrv')\n\n pose = Sensor('pose')\n robot.append(pose)\n pose.configure_mw('yarp')\n\n motion = Actuator('v_omega')\n robot.append(motion)\n motion.configure_mw('yarp')\n \n env = Environment('indoors-1/indoor-1')\n env.configure_service('socket')", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)", "def setup(self):\n self.config = pau.IConfig\n self.session = pau.ISession\n pau.resolve(self)\n\n self.session.assets = Assets()\n self.config.db = self.db_name\n\n self.db = pau.IDb\n pau.resolve(self)\n\n # Instance\n i = Setup()\n pau.resolve(i)\n return i", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def BuildHooksEnv(self):\n args = {}\n if constants.BE_MINMEM in self.be_new:\n args[\"minmem\"] = self.be_new[constants.BE_MINMEM]\n if constants.BE_MAXMEM in self.be_new:\n args[\"maxmem\"] = self.be_new[constants.BE_MAXMEM]\n if constants.BE_VCPUS in self.be_new:\n args[\"vcpus\"] = self.be_new[constants.BE_VCPUS]\n # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk\n # information at all.\n\n if self._new_nics is not None:\n nics = []\n\n for nic in self._new_nics:\n n = copy.deepcopy(nic)\n nicparams = self.cluster.SimpleFillNIC(n.nicparams)\n n.nicparams = nicparams\n nics.append(NICToTuple(self, n))\n\n args[\"nics\"] = nics\n\n env = BuildInstanceHookEnvByObject(self, self.instance, override=args)\n if self.op.disk_template:\n env[\"NEW_DISK_TEMPLATE\"] = self.op.disk_template\n if self.op.runtime_mem:\n env[\"RUNTIME_MEMORY\"] = self.op.runtime_mem\n\n return env", "def _set_environment_vars(self):\n os.environ[\"PATH\"] = os.path.join(self.source_folder, \"depot_tools\") + os.pathsep + os.environ[\"PATH\"]\n os.environ[\"DEPOT_TOOLS_PATH\"] = os.path.join(self.source_folder, \"depot_tools\")\n if tools.os_info.is_windows:\n os.environ[\"DEPOT_TOOLS_WIN_TOOLCHAIN\"] = \"0\"\n os.environ[\"GYP_MSVS_VERSION\"] = \"2017\" if str(self.settings.compiler.version) == \"15\" else \"2019\"", "def create_aiida_project_environment(self):\n try:\n self.create_folder_structure()\n self.build_python_environment()\n self.install_packages_from_index()\n except Exception:\n self.exit_on_exception()\n raise\n self.create_spec_entry()", "def _setup(app_obj):", "def init_env(self, env_info):\n raise NotImplementedError", "def Setup(self):\n raise NotImplementedError(\n 'No runtime setup defined for %s' % self.__class__.__name__)", "def __setup(self):\n\n build_environment = []\n\n # The download URL has the format contains vMAJOR.MINOR in the\n # path and the tarball contains MAJOR.MINOR.REVISION, so pull\n # apart the full version to get the MAJOR and MINOR components.\n match = re.match(r'(?P<major>\\d+)\\.(?P<minor>\\d+)', self.version)\n major_minor = 'v{0}.{1}'.format(match.groupdict()['major'],\n match.groupdict()['minor'])\n tarball = 'openmpi-{}.tar.bz2'.format(self.version)\n url = '{0}/{1}/downloads/{2}'.format(self.baseurl, major_minor,\n tarball)\n\n # CUDA\n if self.cuda:\n if self.__toolchain.CUDA_HOME:\n self.configure_opts.append(\n '--with-cuda={}'.format(self.__toolchain.CUDA_HOME))\n else:\n self.configure_opts.append('--with-cuda')\n else:\n self.configure_opts.append('--without-cuda')\n\n # InfiniBand\n if self.infiniband:\n self.configure_opts.append('--with-verbs')\n else:\n self.configure_opts.append('--without-verbs')\n\n # UCX\n if self.__ucx:\n if isinstance(self.__ucx, string_types):\n # Use specified path\n self.configure_opts.append('--with-ucx={}'.format(self.__ucx))\n else:\n self.configure_opts.append('--with-ucx')\n\n # If UCX was built with CUDA support, it is linked with\n # libcuda.so.1, which is not available during the\n # build stage. Assume that if OpenMPI is built with\n # CUDA support, then UCX was as well...\n if self.cuda:\n cuda_home = \"/usr/local/cuda\"\n if self.__toolchain.CUDA_HOME:\n cuda_home = self.__toolchain.CUDA_HOME\n self.__commands.append('ln -s {0} {1}'.format(\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so'),\n os.path.join(cuda_home, 'lib64', 'stubs', 'libcuda.so.1')))\n if not self.__toolchain.LD_LIBRARY_PATH:\n build_environment.append('LD_LIBRARY_PATH=\"{}:$LD_LIBRARY_PATH\"'.format(os.path.join(cuda_home, 'lib64', 'stubs')))\n\n if self.directory:\n # Use source from local build context\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd, self.directory),\n toolchain=self.__toolchain))\n else:\n # Download source from web\n self.__commands.append(self.download_step(url=url,\n directory=self.__wd))\n self.__commands.append(self.untar_step(\n tarball=os.path.join(self.__wd, tarball), directory=self.__wd))\n self.__commands.append(self.configure_step(\n directory=os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version)),\n environment=build_environment,\n toolchain=self.__toolchain))\n\n self.__commands.append(self.build_step())\n\n if self.__check:\n self.__commands.append(self.check_step())\n\n self.__commands.append(self.install_step())\n\n # Set library path\n libpath = os.path.join(self.prefix, 'lib')\n if self.ldconfig:\n self.__commands.append(self.ldcache_step(directory=libpath))\n else:\n self.__environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)\n\n if self.directory:\n # Using source from local build context, cleanup directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, self.directory)]))\n else:\n # Using downloaded source, cleanup tarball and directory\n self.__commands.append(self.cleanup_step(\n items=[os.path.join(self.__wd, tarball),\n os.path.join(self.__wd,\n 'openmpi-{}'.format(self.version))]))", "def setup(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU,\n draw_probability=DRAW_PROBABILITY, backend=None, env=None):\n if env is None:\n env = TrueSkill(mu, sigma, beta, tau, draw_probability, backend)\n global_env.__trueskill__ = env\n return env", "def createEnvironment(self, _):\r\n if self._namespaces:\r\n raise InternalError('The environment can have only one namespace '\r\n 'at a time.')\r\n\r\n environment = Environment(self)\r\n return self._avatar.callRemote('setupNamespace', environment)", "def init():\n env = Environment(5, 5, 20, [10, 20, 10, 5])\n return env", "def ioc(globals):\n\tfrom Module.Shapes.ShapeFactory import shape_factory\n\tglobals['shape_factory'] = shape_factory\n\tfrom Module.Lighting.Colors import Colors\n\tglobals['Colors'] = Colors", "def setup(bot):\n bot.add_cog(Info(bot))", "def create_aiida_project_environment(self):\n # mock the virtualenv activation procedure\n venv_prefix = self.env_folder / self.proj_name\n current_env = os.environ.copy()\n current_env.pop('PYTHONHOME', None)\n current_env['VIRTUAL_ENV'] = venv_prefix\n old_path = current_env['PATH']\n new_path = str(venv_prefix / 'bin') + os.pathsep + old_path\n current_env['PATH'] = new_path\n try:\n self.create_folder_structure()\n self.build_python_environment()\n self.install_packages_from_index(env=current_env)\n self.install_packages_from_source(env=current_env)\n except Exception:\n self.exit_on_exception()\n raise\n self.create_spec_entry()", "def _init_nt():\n g = {}\n g['EXE'] = \".exe\"\n g['SO'] = \".pyd\"\n g['SOABI'] = g['SO'].rsplit('.')[0] # xxx?\n\n global _config_vars\n _config_vars = g", "def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0", "def SetupEnvironment(self):\n self._adb.RunShellCommand('chmod 777 /data/local/tmp')\n self._adb.RunShellCommand('setenforce 0')\n for prop in self._wrap_properties:\n self._adb.RunShellCommand('setprop %s \"logwrapper %s\"' % (\n prop, self.GetTestWrapper()))\n SetChromeTimeoutScale(self._adb, self.GetTimeoutScale())", "def __init__(self):\n super().__init__()\n TemplateEngineFactory.register_factory('Jinja2Engine', Jinja2Engine.Factory)\n\n step1 = PrepareAppConfTransfiguration()\n step2 = ConfReaderToContextTransfiguration()\n step3 = AttributeChainedTransfiguration('mbean')\n\n self.add(step1)\n self.add(step2)\n self.add(step3)" ]
[ "0.5895225", "0.558601", "0.55457306", "0.5461047", "0.54333425", "0.54333425", "0.53935885", "0.53727806", "0.53727806", "0.53727806", "0.53727806", "0.53727806", "0.53727806", "0.53605133", "0.5341112", "0.5335485", "0.53309304", "0.52924156", "0.5265965", "0.5261049", "0.5258614", "0.52500117", "0.5236343", "0.520748", "0.5191519", "0.51812476", "0.51768017", "0.5161454", "0.513921", "0.51100016" ]
0.69298697
0
Compare two categorical histograms and return a overlap score based on RMSE b1 bin edges of hist 1 b2 bin edges of hist 2 h1 histogram values of hist 1 h2 histogram values of hist 2 Return rmsebased overlap score
def _compare_cat_hist(b1, b2, h1, h2): cbe = list(set(b1) | set(b2)) total = len(cbe) rmse = 0.0 if sum(h1) == 0 or sum(h2) == 0: return 0.0 for index in range(total): sh1 = 0.0 sh2 = 0.0 try: sh1 = float(h1[b1.index(cbe[index])]) except Exception as e: sh1 = 0.0 try: sh2 = float(h2[b2.index(cbe[index])]) except Exception as e: sh2 = 0.0 sh1 = sh1 / sum(h1) sh2 = sh2 / sum(h2) rmse += ((sh1 - sh2) ** 2) rmse = (rmse) ** 0.5 print("Cat: rmse score: {}".format(rmse)) return rmse
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compare_cont_hist(b1, b2, h1, h2):\n\n b1 = copy.deepcopy(b1)\n h1 = copy.deepcopy(h1)\n b2 = copy.deepcopy(b2)\n h2 = copy.deepcopy(h2)\n\n bd1 = [float(x) for x in b1]\n bd2 = [float(x) for x in b2]\n\n inf = float('inf')\n\n if bd1[0] == -inf:\n del bd1[0]\n del h1[0]\n if bd1[-1] == inf:\n del bd1[-1]\n del h1[-1]\n if bd2[0] == -inf:\n del bd2[0]\n del h2[0]\n if bd2[-1] == inf:\n del bd2[-1]\n del h2[-1]\n\n cbe = sorted(list(set(bd1) | set(bd2)))\n\n total = len(cbe)\n\n curr1 = 0\n curr2 = 0\n init = False\n rmse = 0.0\n\n if sum(h1) == 0 or sum(h2) == 0:\n return 0\n\n for index in range(total):\n if init is False:\n init = True\n prev1 = 0\n prev2 = 0\n else:\n if (curr1 > prev1 and curr1 < len(bd1)):\n sh1 = float(h1[prev1] * (cbe[index] - cbe[index - 1])) / (bd1[curr1] - bd1[prev1])\n else:\n sh1 = 0.0\n if (curr2 > prev2 and curr2 < len(bd2)):\n sh2 = float(h2[prev2] * (cbe[index] - cbe[index - 1])) / (bd2[curr2] - bd2[prev2])\n else:\n sh2 = 0.0\n\n if math.isnan(sh1) is False and math.isnan(sh2) is False:\n sh1 = sh1 / sum(h1)\n sh2 = sh2 / sum(h2)\n rmse += ((sh1 - sh2) ** 2)\n\n if (curr1 < len(bd1) and bd1[curr1] <= cbe[index]):\n prev1 = curr1\n curr1 += 1\n if (curr2 < len(bd2) and bd2[curr2] <= cbe[index]):\n prev2 = curr2\n curr2 += 1\n\n rmse = (rmse) ** 0.5\n\n print(\"Cont: rmse score: {}\".format(rmse))\n return rmse", "def bins_match (a, b):\n return 0 == (\n np.sum ((a.xbins - b.xbins)**2)\n + np.sum ((a.ybins - b.ybins)**2) )", "def compareHistograms(reference,model,name):\n# comparison = TH1D('comparison'+name,'', reference.GetNbinsX(),\n# reference.GetBinLowEdge(1),reference.GetBinLowEdge(reference.GetNbinsX())+reference.GetBinWidth(1))\n comparison = reference.Clone('comparison'+name)\n\n maxY,minY=2,0\n #maxY,minY=5,-5\n content, uncertainty = {}, {} \n for bin in range(1,reference.GetNbinsX()+1):\n reference_content= reference.GetBinContent(bin)\n reference_error = reference.GetBinError(bin)**2 # squared\n model_content = 0.0\n model_error = 0.0\n if model.Class_Name()=='THStack':\n for h in model.GetHists():\n model_content+=h.GetBinContent(bin)\n model_error+=h.GetBinError(bin)**2 # squared\n else:\n model_content= model.GetBinContent(bin)\n model_error = model.GetBinError(bin)**2 # squared\n\n #### Data/MC ###\n if True:\n try: \n comparison.SetBinContent(bin,min(max(reference_content/model_content, minY),maxY))\n comparison.SetBinError(bin,(reference_content/model_content)*math.sqrt(float(reference_error)/(reference_content**2) + float(model_error)/(model_content**2)))\n except: \n comparison.SetBinContent(bin,1)\n comparison.SetBinError(bin,0)\n\n #### Chi ###\n if False:\n try: \n error = math.sqrt(model_error+reference_error)\n comparison.SetBinContent(bin,min(max((reference_content - model_content)/error, minY),maxY))\n comparison.SetBinError(bin, 1 )\n except: \n comparison.SetBinContent(bin,0)\n comparison.SetBinError(bin,1)\n\n #comparison.SetAxisRange(minY,maxY,'Y')\n comparison.SetAxisRange(0.5,1.5,'Y')\n return comparison", "def similarity_score(self, img1, img2):\n\t\t# resize into the same shape first\n\t\tif img1.shape != img2.shape:\n\t\t\tv, h = max(img1.shape[0], img2.shape[0]), max(img1.shape[1], img2.shape[1])\n\t\t\tdim = (h, v)\n\t\t\th_scale = min(img1.shape[1], img2.shape[1]) / h\n\t\t\tv_scale = min(img1.shape[0], img2.shape[0]) / v\n\t\t\timg1 = cv2.resize(img1, dim, interpolation = cv2.INTER_AREA)\n\t\t\timg2 = cv2.resize(img2, dim, interpolation = cv2.INTER_AREA)\n\t\t# # histogram\n\t\t# diff = 0\n\t\t# for c in range(3):\n\t\t# \thist1 = cv2.calcHist([img1], [c], None, [256], [0, 256])\n\t\t# \thist2 = cv2.calcHist([img2], [c], None, [256], [0, 256])\n\t\t# \tdiff += np.linalg.norm(hist1 - hist2)\n\n\t\t# HoG\n\t\tfd1, _ = hog(img1, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n\t\tfd2, _ = hog(img2, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n\t\t# Combine both\n\t\tdist = np.linalg.norm(fd1 - fd2)\n\t\taim = mean_pixel_intensity_diff(img1, img2)\n\t\tscore = 1 / (dist + aim + 1)\n\t\treturn score", "def get_identical_score(bin1,bin2=None):\n if bin2==None: bin2=[]\n tmpscore=0.0\n norm=0\n for ali1 in bin1:\n tmpscore+=get_subscore(ali1,ali1)\n norm+=1\n for ali2 in bin2:\n tmpscore+=get_subscore(ali2,ali2)\n norm+=1\n return tmpscore/norm", "def similarity_two_images_hog(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hog_image1 = hog_of_image(img1)\n hog_image2 = hog_of_image(img2)\n\n max_difference = max(2 * sum_all_magnitudes(img1), 2 * sum_all_magnitudes(img2))\n return 100 - 100 * np.sum(np.absolute(hog_image1 - hog_image2)) / max_difference", "def similarity_two_images_color(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hist_image_1 = histogram_of_image_color(img1, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n hist_image_2 = histogram_of_image_color(img2, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n max_difference = max(2 * np.sum(hist_image_1), 2 * np.sum(hist_image_2))\n return 100 - 100 * np.sum(np.absolute(hist_image_1 - hist_image_2)) / max_difference", "def match(desc1,desc2,threshold=0.5):\n n = len(desc1[0])\n # pair-wise distances\n d = -np.ones((len(desc1),len(desc2)))\n for i in range(len(desc1)):\n for j in range(len(desc2)):\n d1 = (desc1[i] - np.mean(desc1[i])) / np.std(desc1[i])\n d2 = (desc2[j] - np.mean(desc2[j])) / np.std(desc2[j])\n ncc_value = sum(d1 * d2) / (n-1)\n if ncc_value > threshold:\n d[i,j] = ncc_value\n ndx = np.argsort(-d)\n matchscores = ndx[:,0]\n return matchscores", "def hist_sim(hist1, hist2):\n # print(np.dot(hist1,hist2.T)/(np.linalg.norm(hist1)*np.linalg.norm(hist2)))\n sim = cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL)\n # print('sim')\n return sim", "def overlap_score(labels, labels_pred):\n raw_overlap = 1-fraction_mislabeled_nodes(labels, labels_pred)\n partition_true = np.array(labels).astype(int)\n partition_pred = np.array(labels_pred).astype(int)\n num_nodes = partition_pred.size\n num_groups = partition_true.max() + 1\n\n chance_level = 0.\n for i in range(num_groups):\n temp = np.sum(i == partition_true) / num_nodes\n if temp > chance_level:\n chance_level = temp\n\n score = (raw_overlap - chance_level) / (1 - chance_level)\n if score <= 0:\n score = 0\n\n return score", "def pred_overlap(t, h):\n a_set = set(get_pred(t))\n b_set = set(get_pred(h))\n return len(a_set&b_set)/float(len(a_set|b_set))", "def computeCriteria(seg1,seg2,mergedSegments,weights):\n criteronScores = [\n profileSim(seg1,[seg2],updatedSpeed),\n directtion(seg1,[seg2],mergedSegments),\n shareNoEdges(seg1,[seg2],mergedSegments)\n ]\n return sum(criteronScores*weights)", "def rmse_calc(arr1, arr2):\n assert arr1.shape==arr2.shape\n \n return np.sqrt(np.mean((arr2-arr1)**2))", "def match(desc1,desc2):\n\t\n\tdesc1 = array([d/linalg.norm(d) for d in desc1])\n\tdesc2 = array([d/linalg.norm(d) for d in desc2])\n\t\n\tdist_ratio = 0.6\n\tdesc1_size = desc1.shape\n\t\n\tmatchscores = zeros((desc1_size[0],1))\n\tdesc2t = desc2.T #precompute matrix transpose\n\tfor i in range(desc1_size[0]):\n\t\tdotprods = dot(desc1[i,:],desc2t) #vector of dot products\n\t\tdotprods = 0.9999*dotprods\n\t\t#inverse cosine and sort, return index for features in second image\n\t\tindx = argsort(arccos(dotprods))\n\t\t\n\t\t#check if nearest neighbor has angle less than dist_ratio times 2nd\n#\t\tif arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n\t\tmatchscores[i] = int(indx[0])\n\t\n\treturn matchscores", "def plot_histogram(site, data1, data2, label1='Data1', label2='Data2', subset_label=None, variable=None):\n # print some parameters of data\n print('Ref data: {}'.format(len(data1)))\n print('New data: {}'.format(len(data2)))\n\n # get histogram parameters\n range_min = np.nanmin(np.hstack((data1, data2)))-np.nanmin(np.hstack((data1, data2))) % 10\n range_max = np.nanmax(np.hstack((data1, data2))) + (10 - np.nanmax(np.hstack((data1, data2))) % 10)\n bins = int(range_max - range_min)\n\n # compute histograms\n hist1, bin_edges = np.histogram(data1, bins=bins, range=(range_min, range_max), density=True)\n hist2, bin_edges = np.histogram(data2, bins=bins, range=(range_min, range_max), density=True)\n\n # gev fitting--use function to try a couple times to get a good fit\n shape1, loc1, scale1 = get_gev_fit(data1)\n shape2, loc2, scale2 = get_gev_fit(data2)\n\n x_gev = np.linspace(range_min, range_max, bins*10+1)\n y1_gev = gev.pdf(x_gev, shape1, loc1, scale1)\n y2_gev = gev.pdf(x_gev, shape2, loc2, scale2)\n\n # compute POD and FAR of 2.5-sigma event (from reference climate)\n mean1 = gev.mean(shape1, loc=loc1, scale=scale1)\n mean2 = gev.mean(shape2, loc=loc2, scale=scale2)\n std1 = np.sqrt(gev.var(shape1, loc=loc1,scale=scale1))\n std2 = np.sqrt(gev.var(shape2, loc=loc2,scale=scale2))\n # calculate a, b, and c params from Durran 2019\n sig20_thres = np.where((x_gev > mean1 + 2.0 * std1))\n sig25_thres = np.where((x_gev > mean1 + 2.5 * std1))\n sig35_thres = np.where((x_gev > mean1 + 3.5 * std1))\n c_val = np.sum(y1_gev[sig25_thres])\n a_val = np.sum(y2_gev[sig25_thres]) - c_val\n b_val = np.sum(y2_gev[sig20_thres]) - np.sum(y1_gev[sig20_thres]) - a_val\n pod = a_val/(a_val+b_val)\n far = c_val/(a_val+c_val)\n print('POD = {} FAR = {}'.format(pod, far))\n\n\n fig = plt.figure()\n fig.set_size_inches(6, 4)\n\n # stats of gev fit\n #mean1, var1, skew1, kurt1 = gev.stats(shape1, moments='mvsk')\n\n mu1 = np.mean(data1)\n sigma1 = np.std(data1)\n mu2 = np.mean(data2)\n sigma2 = np.std(data2)\n\n\n plt.bar(bin_edges[:-1], hist1, width=1, align='edge', color='blue', alpha=0.5, label=label1)\n plt.bar(bin_edges[:-1], hist2, width=1, align='edge', color='red', alpha=0.5, label=label2)\n plt.plot(x_gev, y1_gev, color='blue')\n plt.plot(x_gev, y2_gev, color='red')\n plt.plot([x_gev[sig20_thres[0][0]], x_gev[sig20_thres[0][0]]], [0,y2_gev[sig20_thres[0][0]]], color='k', lw=1.0)\n plt.plot([x_gev[sig25_thres[0][0]], x_gev[sig25_thres[0][0]]], [0, y2_gev[sig25_thres[0][0]]], color='k', lw=1.0)\n #plt.plot([x_gev[sig35_thres[0][0]], x_gev[sig35_thres[0][0]]], [0, y2_gev[sig35_thres[0][0]]], color='k', lw=1.0)\n plt.plot([mu1, mu1], [0, 1], color='blue', linestyle=':')\n plt.plot([mu2, mu2], [0, 1], color='red', linestyle=':')\n\n plt.ylabel('PDF')\n plt.xlabel('Temperature')\n plt.ylim(0, np.max((np.max(hist1),np.max(hist2),np.max(y1_gev),np.max(y2_gev)))+0.02)\n\n plt.legend()\n plt.title('{} {}'.format(site, subset_label))\n\n plt.savefig('{}{}_{}{}.png'.format(config['PLOT_DIR'], site, subset_label, variable), bbox_inches='tight', dpi=200)\n print('Plotted histogram for {}'.format(site))\n\n return", "def calculate_histogram(self, abstract_features_1, abstract_features_2):\n scores = torch.mm(abstract_features_1, abstract_features_2).detach()\n scores = scores.view(-1, 1)\n hist = torch.histc(scores, bins=self.args.bins)\n hist = hist/torch.sum(hist)\n hist = hist.view(1, -1)\n return hist", "def get_overlap_metrics():\n return [DiceCoefficient(),\n JaccardCoefficient(),\n AreaUnderCurve(),\n CohenKappaMetric(),\n RandIndex(),\n AdjustedRandIndex(),\n InterclassCorrelation(),\n VolumeSimilarity(),\n MutualInformation()]", "def bins_match (a, b):\n return np.sum ((a.bins - b.bins)**2) == 0", "def compare_histograms(categorical_var, numerical_var):\n pass", "def match(desc1,desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n dist_ratio = 0.6\n desc1_size = desc1.shape\n matchscores = zeros((desc1_size[0],1),'int')\n desc2t = desc2.T # precompute matrix transpose\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t) # vector of dot products\n dotprods *= 0.9999\n # inverse cosine and sort, return index for features in second image\n indx = argsort(arccos(dotprods))\n # check if nearest neighbor has angle less than dist_ratio times 2nd\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n return matchscores", "def compare_mi_histograms(outfile, infile1, infile2, maxv=None):\n data1 = file_columns_to_list(infile2, 0, 1)\n data2 = file_columns_to_list(infile1, 0, 1)\n # print \"{} values read from {}\\n{} values read from {}\".format(len(data1), infile1, len(data2), infile2)\n # print data1\n # print data2\n tot1 = 0\n tot2 = 0\n maxdiff = [0, 1, 0]\n if maxv:\n data1 = add_missing(data1, maxv)\n data2 = add_missing(data2, maxv)\n else:\n data1 = conv_and_reverse(data1)\n data2 = conv_and_reverse(data2)\n\n with open(outfile, \"w\") as out:\n out.write(\"#Idx\\tRandom\\tReal\\tDiff\\tFPR\\t% Diff\\n\")\n for i in range(len(data1)):\n x1 = data1[i][1]\n x2 = data2[i][1]\n tot1 += x1\n tot2 += x2\n diff = tot2-tot1\n # print \"{}-{} = {} ({})\".format(tot1, tot2, diff, maxdiff)\n if tot2 == 0:\n fpr = 0\n else:\n fpr = 1.0 * tot1 / tot2\n if tot1 == 0:\n pdiff = 0\n else:\n pdiff = 1.0 * diff / tot1\n out.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(data1[i][0], tot1, tot2, diff, fpr, pdiff))\n # raw_input()\n if diff > maxdiff[0]:\n maxdiff[0] = diff\n maxdiff[1] = data1[i][0]\n maxdiff[2] = fpr\n return maxdiff", "def compute_cluster_similarities(emb_clusters1, emb_clusters2, compare, order, clmethod, plot):\n def compute_sim(e, e1, cls, cls1):\n sims = np.empty((20, 20))\n xticks, yticks = [], []\n for i, c in enumerate(cls):\n yticks.append(', '.join(c[1]) + (f' {round(c[3], 5)}' if order == 'avgfreq' else ''))\n for j, c1 in enumerate(cls1):\n if len(xticks) < 20:\n xticks.append(', '.join(c1[1]) + (f' {round(c1[3], 5)}' if order == 'avgfreq' else ''))\n sims[i, j] = jaccard_similarity_score(c[2], c1[2])\n jaccard_similarities[f'{e}-{e1}'] = sims\n\n if plot:\n if order == 'clustermap':\n similarity_clustermap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}')\n elif order == 'default' or order == 'avgfreq':\n similarity_heatmap(sims, xticks, yticks, f'{e}-{e1}_{clmethod}', order)\n else:\n pass\n\n jaccard_similarities = {}\n if compare == 'cross':\n for ie, (e, cls) in enumerate(emb_clusters1.items()):\n for ie1, (e1, cls1) in enumerate(emb_clusters2.items()):\n if ie < ie1:\n compute_sim(e, e1, cls, cls1)\n elif compare == 'dot':\n for (e, cls), (e1, cls1) in zip(emb_clusters1.items(), emb_clusters2.items()):\n compute_sim(e, e1, cls, cls1)\n\n return jaccard_similarities", "def compare(hists, args):\n if args.total:\n total_h = deepcopy(hists[0])\n if args.line:\n total_h.values = np.sum( h.values for h in hists)\n total_h.values = total_h.values + (np.ones_like(total_h.values)* args.line[0])\n else:\n total_h.values = np.sum((h.values for h in hists))\n total_h.yerr = np.sqrt(np.sum( h.yerr*h.yerr for h in hists))\n total_h.label = 'Total'\n hists.insert(0,total_h)\n if args.totalsq:\n total_h = deepcopy(hists[0])\n if args.line:\n total_h.values = np.sum( h.values*h.values for h in hists)\n total_h.values = total_h.values + (np.ones_like(total_h.values)* (args.line[0]*args.line[0]))\n total_h.values = np.sqrt(total_h.values)\n else:\n total_h.values = np.sqrt(np.sum( h.values*h.values for h in hists))\n total_h.yerr = np.zeros_like(total_h.yerr)\n total_h.label = 'Total'\n hists.insert(0,total_h)\n \n neutral = style.get_colors(\"neutral\");\n if not args.colors:\n if args.colorscheme:\n neutral = style.get_colors(args.colorscheme,len(hists));\n bold = style.get_colors(args.colorscheme,len(hists));\n light = style.get_colors(args.colorscheme,len(hists));\n else:\n neutral = style.get_colors(\"neutral\");\n bold = style.get_colors(\"bold\");\n light = style.get_colors(\"light\");\n for i,hist in enumerate(hists):\n if \"bold\" == hist.emph:\n hist.color = bold[i%len(bold)]\n elif \"light\" == hist.emph:\n hist.color = light[i%len(bold)]\n else:\n hist.color = neutral[i%len(bold)]\n\n fig = plt.figure()\n if args.ratio:\n gs = gridspec.GridSpec(2,1,height_ratios=[3,1])\n ax = plt.subplot(gs[0])\n ax_ratio = plt.subplot(gs[1], sharex=ax)\n rhists = [hist.divide(hists[-1]) for hist in hists[:-1]]\n for rhist in rhists:\n rhist.plot_lines(ax_ratio)\n plt.subplots_adjust(hspace=0)\n plt.setp(ax.get_xticklabels(), visible=False)\n setup_ratio(args, ax, ax_ratio)\n else:\n ax = fig.add_subplot(111)\n\n if args.alpha:\n for hist in hists:\n hist.options['alpha'] = args.alpha\n\n if args.fit:\n for hist in hists:\n v = fits[args.fit[0]][1](hist.lefts + hist.widths/2.0, hist.values)\n params = tuple([v[i] for i in xrange(args.fit[1].count('%'))])\n hist.label += (args.fit[1] % params)\n x = np.linspace(hist.lefts[0],hist.lefts[-1]+hist.widths[-1],200)\n ax.plot(x,fits[args.fit[0]][0](v,x), color = hist.color)\n\n if args.noerror:\n for hist in hists:\n hist.plot_noerror(ax)\n elif args.points:\n for hist in hists:\n hist.plot_points(ax)\n else:\n for hist in hists:\n hist.plot_lines(ax)\n\n\n fig.subplots_adjust(bottom=.12, left=.14)\n plot_lines(args, ax, neutral, len(hists), hists)\n setup_figure(args, ax)\n ax.legend(frameon=False,loc=args.loc)\n \n if \".\" not in args.name:\n fig.savefig(args.outDir+args.name+\".pdf\", transparent=args.transparent)\n print \"Saving figure: %s.pdf\" % args.outDir+args.name\n else:\n fig.savefig(args.outDir+args.name, transparent=args.transparent)\n print \"Saving figure: %s\" % args.outDir+args.name\n\n plt.close(fig)\n return", "def height_similarity(h1, h2, condition):\n if h1 and h2 and len(condition) > 0:\n if h2 < condition[0] or h2 > condition[1]:\n return 0.5\n gap = abs(h1 - h2)\n if 0 <= gap and gap < 10:\n return 0.8\n elif 10 <= gap and gap < 20:\n return 0.9\n else:\n return 1.0\n else:\n return 0.0", "def compare(cls, data_hist, ref_hist, tolerance):\n dmean = abs(data_hist.GetMean() - ref_hist.GetMean())\n dwidth = abs(data_hist.GetRMS() - ref_hist.GetRMS())\n score = 70.0 * (dmean < abs(0.3*ref_hist.GetRMS()))\n score += 30.0 * (dwidth < abs(tolerance*ref_hist.GetRMS()))\n if score > 70.0: # both passes: 100\n level = ERROR_LEVELS.OK\n elif score >= 30.0: # only one passes: 70 or 30\n level = ERROR_LEVELS.WARNING\n else: # both fails: 0\n level = ERROR_LEVELS.ERROR\n debug('score: {}, level: {}'.format(score, level))\n return cls.create_final_dict(Score(score), level)", "def _histogram_intersection_distance(a, b):\n # branching version\n #return np.vstack((a, b)).min(axis=0).sum()\n\n # Non-branching version\n # noinspection PyUnresolvedReferences\n return (a + b - np.abs(a - b)).sum() * 0.5", "def mean_relationship(x, y, bins_values):\r\n sort_ind_x = np.argsort(x)\r\n x = x[sort_ind_x]\r\n y = y[sort_ind_x]\r\n hist, bin_edges = np.histogram(x, bins=bins_values)\r\n array_end = np.cumsum(hist)\r\n array_start = np.cumsum(hist) - hist\r\n y_x = np.zeros(len(array_start))\r\n y_x_std = np.zeros(len(array_start))\r\n for i in np.arange(len(array_start)):\r\n y_x[i] = np.mean(y[array_start[i]:array_end[i]])\r\n y_x_std[i] = np.std(y[array_start[i]:array_end[i]])\r\n return y_x, y_x_std", "def plot_compare_train_test(decisions,bins,classifier, ws=None):\n low = min(np.min(d) for d in decisions)\n high = max(np.max(d) for d in decisions)\n low_high = (low,high)\n # Plot with python.\n plt.figure()\n plt.hist(decisions[0], color='b', alpha=0.5, range=low_high, bins=bins, histtype='stepfilled', density=True, label='S (train)', weights=ws[0])\n plt.hist(decisions[1], color='r', alpha=0.5, range=low_high, bins=bins, histtype='stepfilled', density=True, label='B (train)', weights=ws[1])\n hist, bins = np.histogram(decisions[2], bins=bins, range=low_high, density=True, weights=ws[2])\n center = (bins[:-1] + bins[1:]) / 2\n #scale = len(decisions[2]) / sum(hist)\n scale = sum(ws[2]) / sum(hist)\n err = np.sqrt(hist * scale) / scale\n plt.errorbar(center, hist, yerr=err, fmt='o', c='b', label='S (test)')\n hist, bins = np.histogram(decisions[3], bins=bins, range=low_high, density=True, weights=ws[3])\n #scale = len(decisions[3]) / sum(hist)\n scale = sum(ws[3]) / sum(hist)\n err = np.sqrt(hist * scale) / scale\n plt.errorbar(center, hist, yerr=err, fmt='o', c='r', label='B (test)')\n plt.xticks(np.arange(0, 1, step=0.1))\n plt.xlabel(\"Classifier output\")\n plt.ylabel(\"Arbitrary units\")\n plt.legend(loc='best')\n plt.savefig('plots/plt_' + classifier+'_Output.pdf',format='pdf')\n plt.show(block = False)\n return None", "def get_random_score(bin1,bin2,nsample,bootstrap=1.0):\n totscore=0.0\n isdiagonal=False\n if bin1==bin2:\n isdiagonal=True\n bin1=random.sample(bin1,int(len(bin1)*bootstrap))\n bin2=random.sample(bin2,int(len(bin2)*bootstrap))\n for n in range(0,nsample):\n rbin1=[''.join(random.sample(ali1,len(ali1))) for ali1 in bin1]\n if isdiagonal:\n rbin2=rbin1 #if the two bins are identical, the randomization should also be\n else:\n rbin2=[''.join(random.sample(ali2,len(ali2))) for ali2 in bin2]\n if mixvec:\n vec1=_seqs2vec(rbin1)\n vec2=_seqs2vec(rbin2)\n score=get_subscore_mixvec(vec1,vec2)\n else:\n score=get_subscore_pairwise(rbin1,rbin2)\n totscore+=score\n return totscore/nsample", "def get_ODER(A,B):\n\n # mean area of raters\n MTA = (np.sum(A) + np.sum(B))/2.\n\n # intersection of outlines\n intersect = np.multiply(A,B)\n\n # regions in A\n labels_A = skm.label(A)\n\n # regions in B\n labels_B = skm.label(B)\n\n # labels in found in A but also in B\n labels_in_A_and_B = np.unique(np.multiply(intersect, labels_A))\n labels_in_B_and_A = np.unique(np.multiply(intersect, labels_B))\n\n # labels unique in A and unique in B\n labels_only_in_A = np.asarray([ii for ii in np.unique(labels_A) if ii not in labels_in_A_and_B])\n labels_only_in_B = np.asarray([ii for ii in np.unique(labels_B) if ii not in labels_in_B_and_A])\n\n # make sure 0 is not picked up\n labels_in_A_and_B = labels_in_A_and_B[labels_in_A_and_B>0]\n labels_in_B_and_A = labels_in_B_and_A[labels_in_B_and_A>0]\n labels_only_in_A = labels_only_in_A[labels_only_in_A>0]\n labels_only_in_B = labels_only_in_B[labels_only_in_B>0]\n\n # calculate detection error\n # sum of areas only picked up by A plus sum of areas only picked up by B\n DE = np.sum([np.sum(labels_A==ii) for ii in labels_only_in_A]) + np.sum([np.sum(labels_B==ii) for ii in labels_only_in_B])\n\n # calculate outline error\n # total difference between union and intersection of the region that was outlines by both\n # = area determined by rater 1 + area determined by rater b - 2 * area determined by both\n # as union is area determined by rater 1 + area determined by rater b - area determined by both\n OE = np.sum([np.sum(labels_A==ii) for ii in labels_in_A_and_B]) + np.sum([np.sum(labels_B==ii) for ii in labels_in_B_and_A]) - 2*np.sum(intersect)\n\n # convert to rates and return\n return OE/MTA, DE/MTA" ]
[ "0.6879068", "0.61870795", "0.6087511", "0.60521746", "0.60158235", "0.59968966", "0.59831977", "0.5939749", "0.5920067", "0.5869332", "0.57640076", "0.5714345", "0.568633", "0.56607807", "0.56391025", "0.5636864", "0.56339514", "0.56069434", "0.56041086", "0.5590879", "0.55260634", "0.55094206", "0.5499281", "0.5485772", "0.5479778", "0.5450621", "0.54435796", "0.54423803", "0.54401356", "0.54384065" ]
0.75110346
0
Compare two continuous histograms and return a overlap score based on RMSE b1 bin edges of hist 1 b2 bin edges of hist 2 h1 histogram values of hist 1 h2 histogram values of hist 2 Return rmsebased overlap score
def _compare_cont_hist(b1, b2, h1, h2): b1 = copy.deepcopy(b1) h1 = copy.deepcopy(h1) b2 = copy.deepcopy(b2) h2 = copy.deepcopy(h2) bd1 = [float(x) for x in b1] bd2 = [float(x) for x in b2] inf = float('inf') if bd1[0] == -inf: del bd1[0] del h1[0] if bd1[-1] == inf: del bd1[-1] del h1[-1] if bd2[0] == -inf: del bd2[0] del h2[0] if bd2[-1] == inf: del bd2[-1] del h2[-1] cbe = sorted(list(set(bd1) | set(bd2))) total = len(cbe) curr1 = 0 curr2 = 0 init = False rmse = 0.0 if sum(h1) == 0 or sum(h2) == 0: return 0 for index in range(total): if init is False: init = True prev1 = 0 prev2 = 0 else: if (curr1 > prev1 and curr1 < len(bd1)): sh1 = float(h1[prev1] * (cbe[index] - cbe[index - 1])) / (bd1[curr1] - bd1[prev1]) else: sh1 = 0.0 if (curr2 > prev2 and curr2 < len(bd2)): sh2 = float(h2[prev2] * (cbe[index] - cbe[index - 1])) / (bd2[curr2] - bd2[prev2]) else: sh2 = 0.0 if math.isnan(sh1) is False and math.isnan(sh2) is False: sh1 = sh1 / sum(h1) sh2 = sh2 / sum(h2) rmse += ((sh1 - sh2) ** 2) if (curr1 < len(bd1) and bd1[curr1] <= cbe[index]): prev1 = curr1 curr1 += 1 if (curr2 < len(bd2) and bd2[curr2] <= cbe[index]): prev2 = curr2 curr2 += 1 rmse = (rmse) ** 0.5 print("Cont: rmse score: {}".format(rmse)) return rmse
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compare_cat_hist(b1, b2, h1, h2):\n cbe = list(set(b1) | set(b2))\n\n total = len(cbe)\n rmse = 0.0\n\n if sum(h1) == 0 or sum(h2) == 0:\n return 0.0\n\n for index in range(total):\n sh1 = 0.0\n sh2 = 0.0\n try:\n sh1 = float(h1[b1.index(cbe[index])])\n except Exception as e:\n sh1 = 0.0\n try:\n sh2 = float(h2[b2.index(cbe[index])])\n except Exception as e:\n sh2 = 0.0\n\n sh1 = sh1 / sum(h1)\n sh2 = sh2 / sum(h2)\n rmse += ((sh1 - sh2) ** 2)\n\n rmse = (rmse) ** 0.5\n print(\"Cat: rmse score: {}\".format(rmse))\n return rmse", "def bins_match (a, b):\n return 0 == (\n np.sum ((a.xbins - b.xbins)**2)\n + np.sum ((a.ybins - b.ybins)**2) )", "def similarity_two_images_hog(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hog_image1 = hog_of_image(img1)\n hog_image2 = hog_of_image(img2)\n\n max_difference = max(2 * sum_all_magnitudes(img1), 2 * sum_all_magnitudes(img2))\n return 100 - 100 * np.sum(np.absolute(hog_image1 - hog_image2)) / max_difference", "def similarity_score(self, img1, img2):\n\t\t# resize into the same shape first\n\t\tif img1.shape != img2.shape:\n\t\t\tv, h = max(img1.shape[0], img2.shape[0]), max(img1.shape[1], img2.shape[1])\n\t\t\tdim = (h, v)\n\t\t\th_scale = min(img1.shape[1], img2.shape[1]) / h\n\t\t\tv_scale = min(img1.shape[0], img2.shape[0]) / v\n\t\t\timg1 = cv2.resize(img1, dim, interpolation = cv2.INTER_AREA)\n\t\t\timg2 = cv2.resize(img2, dim, interpolation = cv2.INTER_AREA)\n\t\t# # histogram\n\t\t# diff = 0\n\t\t# for c in range(3):\n\t\t# \thist1 = cv2.calcHist([img1], [c], None, [256], [0, 256])\n\t\t# \thist2 = cv2.calcHist([img2], [c], None, [256], [0, 256])\n\t\t# \tdiff += np.linalg.norm(hist1 - hist2)\n\n\t\t# HoG\n\t\tfd1, _ = hog(img1, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n\t\tfd2, _ = hog(img2, orientations=8, pixels_per_cell=(16, 16),\n cells_per_block=(1, 1), visualize=True, multichannel=True)\n\t\t# Combine both\n\t\tdist = np.linalg.norm(fd1 - fd2)\n\t\taim = mean_pixel_intensity_diff(img1, img2)\n\t\tscore = 1 / (dist + aim + 1)\n\t\treturn score", "def hist_sim(hist1, hist2):\n # print(np.dot(hist1,hist2.T)/(np.linalg.norm(hist1)*np.linalg.norm(hist2)))\n sim = cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL)\n # print('sim')\n return sim", "def similarity_two_images_color(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hist_image_1 = histogram_of_image_color(img1, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n hist_image_2 = histogram_of_image_color(img2, HIST_BINS_INTENSITY, BIN_DIFFERENCE_INTENSITY_HALF)\n max_difference = max(2 * np.sum(hist_image_1), 2 * np.sum(hist_image_2))\n return 100 - 100 * np.sum(np.absolute(hist_image_1 - hist_image_2)) / max_difference", "def get_identical_score(bin1,bin2=None):\n if bin2==None: bin2=[]\n tmpscore=0.0\n norm=0\n for ali1 in bin1:\n tmpscore+=get_subscore(ali1,ali1)\n norm+=1\n for ali2 in bin2:\n tmpscore+=get_subscore(ali2,ali2)\n norm+=1\n return tmpscore/norm", "def rmse_calc(arr1, arr2):\n assert arr1.shape==arr2.shape\n \n return np.sqrt(np.mean((arr2-arr1)**2))", "def match(desc1,desc2,threshold=0.5):\n n = len(desc1[0])\n # pair-wise distances\n d = -np.ones((len(desc1),len(desc2)))\n for i in range(len(desc1)):\n for j in range(len(desc2)):\n d1 = (desc1[i] - np.mean(desc1[i])) / np.std(desc1[i])\n d2 = (desc2[j] - np.mean(desc2[j])) / np.std(desc2[j])\n ncc_value = sum(d1 * d2) / (n-1)\n if ncc_value > threshold:\n d[i,j] = ncc_value\n ndx = np.argsort(-d)\n matchscores = ndx[:,0]\n return matchscores", "def compareHistograms(reference,model,name):\n# comparison = TH1D('comparison'+name,'', reference.GetNbinsX(),\n# reference.GetBinLowEdge(1),reference.GetBinLowEdge(reference.GetNbinsX())+reference.GetBinWidth(1))\n comparison = reference.Clone('comparison'+name)\n\n maxY,minY=2,0\n #maxY,minY=5,-5\n content, uncertainty = {}, {} \n for bin in range(1,reference.GetNbinsX()+1):\n reference_content= reference.GetBinContent(bin)\n reference_error = reference.GetBinError(bin)**2 # squared\n model_content = 0.0\n model_error = 0.0\n if model.Class_Name()=='THStack':\n for h in model.GetHists():\n model_content+=h.GetBinContent(bin)\n model_error+=h.GetBinError(bin)**2 # squared\n else:\n model_content= model.GetBinContent(bin)\n model_error = model.GetBinError(bin)**2 # squared\n\n #### Data/MC ###\n if True:\n try: \n comparison.SetBinContent(bin,min(max(reference_content/model_content, minY),maxY))\n comparison.SetBinError(bin,(reference_content/model_content)*math.sqrt(float(reference_error)/(reference_content**2) + float(model_error)/(model_content**2)))\n except: \n comparison.SetBinContent(bin,1)\n comparison.SetBinError(bin,0)\n\n #### Chi ###\n if False:\n try: \n error = math.sqrt(model_error+reference_error)\n comparison.SetBinContent(bin,min(max((reference_content - model_content)/error, minY),maxY))\n comparison.SetBinError(bin, 1 )\n except: \n comparison.SetBinContent(bin,0)\n comparison.SetBinError(bin,1)\n\n #comparison.SetAxisRange(minY,maxY,'Y')\n comparison.SetAxisRange(0.5,1.5,'Y')\n return comparison", "def overlap_score(labels, labels_pred):\n raw_overlap = 1-fraction_mislabeled_nodes(labels, labels_pred)\n partition_true = np.array(labels).astype(int)\n partition_pred = np.array(labels_pred).astype(int)\n num_nodes = partition_pred.size\n num_groups = partition_true.max() + 1\n\n chance_level = 0.\n for i in range(num_groups):\n temp = np.sum(i == partition_true) / num_nodes\n if temp > chance_level:\n chance_level = temp\n\n score = (raw_overlap - chance_level) / (1 - chance_level)\n if score <= 0:\n score = 0\n\n return score", "def match(desc1,desc2):\n\t\n\tdesc1 = array([d/linalg.norm(d) for d in desc1])\n\tdesc2 = array([d/linalg.norm(d) for d in desc2])\n\t\n\tdist_ratio = 0.6\n\tdesc1_size = desc1.shape\n\t\n\tmatchscores = zeros((desc1_size[0],1))\n\tdesc2t = desc2.T #precompute matrix transpose\n\tfor i in range(desc1_size[0]):\n\t\tdotprods = dot(desc1[i,:],desc2t) #vector of dot products\n\t\tdotprods = 0.9999*dotprods\n\t\t#inverse cosine and sort, return index for features in second image\n\t\tindx = argsort(arccos(dotprods))\n\t\t\n\t\t#check if nearest neighbor has angle less than dist_ratio times 2nd\n#\t\tif arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n\t\tmatchscores[i] = int(indx[0])\n\t\n\treturn matchscores", "def bins_match (a, b):\n return np.sum ((a.bins - b.bins)**2) == 0", "def match(desc1,desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n dist_ratio = 0.6\n desc1_size = desc1.shape\n matchscores = zeros((desc1_size[0],1),'int')\n desc2t = desc2.T # precompute matrix transpose\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t) # vector of dot products\n dotprods *= 0.9999\n # inverse cosine and sort, return index for features in second image\n indx = argsort(arccos(dotprods))\n # check if nearest neighbor has angle less than dist_ratio times 2nd\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n return matchscores", "def pred_overlap(t, h):\n a_set = set(get_pred(t))\n b_set = set(get_pred(h))\n return len(a_set&b_set)/float(len(a_set|b_set))", "def KolmogorovSmirnoff_statistics(dd1, dd2):\n cum1 = dd1.cumulative_distribution()\n cum2 = dd2.cumulative_distribution()\n minimum = max(cum1[0][0], cum2[0][0])\n maximum = max(cum1[-1][0], cum2[-1][0])\n index1 = len(cum1) - 1\n index2 = len(cum2) - 1\n summa1 = summa2 = 0\n\n difference = 0\n for i in reversed(range(minimum, maximum+1)):\n if cum1[index1][0] == i:\n summa1 = cum1[index1][1]\n index1 -= 1\n if cum2[index2][0] == i:\n summa2 = cum2[index2][1]\n index2 -= 1\n if abs(summa1 - summa2) > difference:\n difference = abs(summa1 - summa2)\n return difference", "def plot_histogram(site, data1, data2, label1='Data1', label2='Data2', subset_label=None, variable=None):\n # print some parameters of data\n print('Ref data: {}'.format(len(data1)))\n print('New data: {}'.format(len(data2)))\n\n # get histogram parameters\n range_min = np.nanmin(np.hstack((data1, data2)))-np.nanmin(np.hstack((data1, data2))) % 10\n range_max = np.nanmax(np.hstack((data1, data2))) + (10 - np.nanmax(np.hstack((data1, data2))) % 10)\n bins = int(range_max - range_min)\n\n # compute histograms\n hist1, bin_edges = np.histogram(data1, bins=bins, range=(range_min, range_max), density=True)\n hist2, bin_edges = np.histogram(data2, bins=bins, range=(range_min, range_max), density=True)\n\n # gev fitting--use function to try a couple times to get a good fit\n shape1, loc1, scale1 = get_gev_fit(data1)\n shape2, loc2, scale2 = get_gev_fit(data2)\n\n x_gev = np.linspace(range_min, range_max, bins*10+1)\n y1_gev = gev.pdf(x_gev, shape1, loc1, scale1)\n y2_gev = gev.pdf(x_gev, shape2, loc2, scale2)\n\n # compute POD and FAR of 2.5-sigma event (from reference climate)\n mean1 = gev.mean(shape1, loc=loc1, scale=scale1)\n mean2 = gev.mean(shape2, loc=loc2, scale=scale2)\n std1 = np.sqrt(gev.var(shape1, loc=loc1,scale=scale1))\n std2 = np.sqrt(gev.var(shape2, loc=loc2,scale=scale2))\n # calculate a, b, and c params from Durran 2019\n sig20_thres = np.where((x_gev > mean1 + 2.0 * std1))\n sig25_thres = np.where((x_gev > mean1 + 2.5 * std1))\n sig35_thres = np.where((x_gev > mean1 + 3.5 * std1))\n c_val = np.sum(y1_gev[sig25_thres])\n a_val = np.sum(y2_gev[sig25_thres]) - c_val\n b_val = np.sum(y2_gev[sig20_thres]) - np.sum(y1_gev[sig20_thres]) - a_val\n pod = a_val/(a_val+b_val)\n far = c_val/(a_val+c_val)\n print('POD = {} FAR = {}'.format(pod, far))\n\n\n fig = plt.figure()\n fig.set_size_inches(6, 4)\n\n # stats of gev fit\n #mean1, var1, skew1, kurt1 = gev.stats(shape1, moments='mvsk')\n\n mu1 = np.mean(data1)\n sigma1 = np.std(data1)\n mu2 = np.mean(data2)\n sigma2 = np.std(data2)\n\n\n plt.bar(bin_edges[:-1], hist1, width=1, align='edge', color='blue', alpha=0.5, label=label1)\n plt.bar(bin_edges[:-1], hist2, width=1, align='edge', color='red', alpha=0.5, label=label2)\n plt.plot(x_gev, y1_gev, color='blue')\n plt.plot(x_gev, y2_gev, color='red')\n plt.plot([x_gev[sig20_thres[0][0]], x_gev[sig20_thres[0][0]]], [0,y2_gev[sig20_thres[0][0]]], color='k', lw=1.0)\n plt.plot([x_gev[sig25_thres[0][0]], x_gev[sig25_thres[0][0]]], [0, y2_gev[sig25_thres[0][0]]], color='k', lw=1.0)\n #plt.plot([x_gev[sig35_thres[0][0]], x_gev[sig35_thres[0][0]]], [0, y2_gev[sig35_thres[0][0]]], color='k', lw=1.0)\n plt.plot([mu1, mu1], [0, 1], color='blue', linestyle=':')\n plt.plot([mu2, mu2], [0, 1], color='red', linestyle=':')\n\n plt.ylabel('PDF')\n plt.xlabel('Temperature')\n plt.ylim(0, np.max((np.max(hist1),np.max(hist2),np.max(y1_gev),np.max(y2_gev)))+0.02)\n\n plt.legend()\n plt.title('{} {}'.format(site, subset_label))\n\n plt.savefig('{}{}_{}{}.png'.format(config['PLOT_DIR'], site, subset_label, variable), bbox_inches='tight', dpi=200)\n print('Plotted histogram for {}'.format(site))\n\n return", "def height_similarity(h1, h2, condition):\n if h1 and h2 and len(condition) > 0:\n if h2 < condition[0] or h2 > condition[1]:\n return 0.5\n gap = abs(h1 - h2)\n if 0 <= gap and gap < 10:\n return 0.8\n elif 10 <= gap and gap < 20:\n return 0.9\n else:\n return 1.0\n else:\n return 0.0", "def _histogram_intersection_distance(a, b):\n # branching version\n #return np.vstack((a, b)).min(axis=0).sum()\n\n # Non-branching version\n # noinspection PyUnresolvedReferences\n return (a + b - np.abs(a - b)).sum() * 0.5", "def match(desc1, desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n\n dist_ratio = 0.6\n disc1_size = desc1.shape\n\n matchscores = zeros((desc1_size[0]), \"int\")\n desc2t = desc2.T\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t)\n dotprods = 0.9999 * dotprods\n\n indx = argsort(arccos(dotprods))\n\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n\n return matchscores", "def compare_mi_histograms(outfile, infile1, infile2, maxv=None):\n data1 = file_columns_to_list(infile2, 0, 1)\n data2 = file_columns_to_list(infile1, 0, 1)\n # print \"{} values read from {}\\n{} values read from {}\".format(len(data1), infile1, len(data2), infile2)\n # print data1\n # print data2\n tot1 = 0\n tot2 = 0\n maxdiff = [0, 1, 0]\n if maxv:\n data1 = add_missing(data1, maxv)\n data2 = add_missing(data2, maxv)\n else:\n data1 = conv_and_reverse(data1)\n data2 = conv_and_reverse(data2)\n\n with open(outfile, \"w\") as out:\n out.write(\"#Idx\\tRandom\\tReal\\tDiff\\tFPR\\t% Diff\\n\")\n for i in range(len(data1)):\n x1 = data1[i][1]\n x2 = data2[i][1]\n tot1 += x1\n tot2 += x2\n diff = tot2-tot1\n # print \"{}-{} = {} ({})\".format(tot1, tot2, diff, maxdiff)\n if tot2 == 0:\n fpr = 0\n else:\n fpr = 1.0 * tot1 / tot2\n if tot1 == 0:\n pdiff = 0\n else:\n pdiff = 1.0 * diff / tot1\n out.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(data1[i][0], tot1, tot2, diff, fpr, pdiff))\n # raw_input()\n if diff > maxdiff[0]:\n maxdiff[0] = diff\n maxdiff[1] = data1[i][0]\n maxdiff[2] = fpr\n return maxdiff", "def get_ODER(A,B):\n\n # mean area of raters\n MTA = (np.sum(A) + np.sum(B))/2.\n\n # intersection of outlines\n intersect = np.multiply(A,B)\n\n # regions in A\n labels_A = skm.label(A)\n\n # regions in B\n labels_B = skm.label(B)\n\n # labels in found in A but also in B\n labels_in_A_and_B = np.unique(np.multiply(intersect, labels_A))\n labels_in_B_and_A = np.unique(np.multiply(intersect, labels_B))\n\n # labels unique in A and unique in B\n labels_only_in_A = np.asarray([ii for ii in np.unique(labels_A) if ii not in labels_in_A_and_B])\n labels_only_in_B = np.asarray([ii for ii in np.unique(labels_B) if ii not in labels_in_B_and_A])\n\n # make sure 0 is not picked up\n labels_in_A_and_B = labels_in_A_and_B[labels_in_A_and_B>0]\n labels_in_B_and_A = labels_in_B_and_A[labels_in_B_and_A>0]\n labels_only_in_A = labels_only_in_A[labels_only_in_A>0]\n labels_only_in_B = labels_only_in_B[labels_only_in_B>0]\n\n # calculate detection error\n # sum of areas only picked up by A plus sum of areas only picked up by B\n DE = np.sum([np.sum(labels_A==ii) for ii in labels_only_in_A]) + np.sum([np.sum(labels_B==ii) for ii in labels_only_in_B])\n\n # calculate outline error\n # total difference between union and intersection of the region that was outlines by both\n # = area determined by rater 1 + area determined by rater b - 2 * area determined by both\n # as union is area determined by rater 1 + area determined by rater b - area determined by both\n OE = np.sum([np.sum(labels_A==ii) for ii in labels_in_A_and_B]) + np.sum([np.sum(labels_B==ii) for ii in labels_in_B_and_A]) - 2*np.sum(intersect)\n\n # convert to rates and return\n return OE/MTA, DE/MTA", "def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps", "def computeCriteria(seg1,seg2,mergedSegments,weights):\n criteronScores = [\n profileSim(seg1,[seg2],updatedSpeed),\n directtion(seg1,[seg2],mergedSegments),\n shareNoEdges(seg1,[seg2],mergedSegments)\n ]\n return sum(criteronScores*weights)", "def match_integral_patches(desc1,desc2,threshold=0.5):\n d = -np.ones((len(desc1),len(desc2)))\n for i in range((len(desc1))):\n desc_rows = desc1[i].shape[0]\n desc_columns = desc1[i].shape[1]\n desc_current_length = desc_rows*desc_columns\n desc1_sq = np.power(desc1[i], 2)\n mean_d1 = (desc1[i][desc_rows-1][desc_columns-1]/desc_current_length - desc1[i][0][desc_columns-1]/desc_current_length - desc1[i][desc_rows-1][0]/desc_current_length + desc1[i][0][0]/desc_current_length)\n mean_d1_sq = (desc1_sq[desc_rows-1][desc_columns-1]/desc_current_length - desc1_sq[0][desc_columns-1]/desc_current_length - desc1_sq[desc_rows-1][0]/desc_current_length + desc1_sq[0][0]/desc_current_length)\n std_d1 = mean_d1_sq - (mean_d1**2)\n flatten_desc1 = desc1[i].flatten()\n d1 = (flatten_desc1 - mean_d1) / std_d1\n for j in range((len(desc2))):\n # TODO: desc dimensions must be the same\n desc2_sq = np.power(desc2[j], 2)\n mean_d2 = desc2[j][desc_rows-1][desc_columns-1]/desc_current_length - desc2[j][0][desc_columns-1]/desc_current_length - desc2[j][desc_rows-1][0]/desc_current_length + desc2[j][0][0]/desc_current_length\n mean_d2_sq = desc2_sq[desc_rows-1][desc_columns-1]/desc_current_length - desc2_sq[0][desc_columns-1]/desc_current_length - desc2_sq[desc_rows-1][0]/desc_current_length + desc2_sq[0][0]/desc_current_length\n std_d2 = mean_d2_sq - (mean_d2**2)\n flatten_desc2 = desc2[j].flatten()\n d2 = (flatten_desc2 - mean_d2) / std_d2\n ncc_value = np.sum(d1 * d2) / (desc_current_length-1)\n if ncc_value > threshold:\n d[i,j] = ncc_value\n ndx = np.argsort(-d)\n matchscores = ndx[:,0]\n return matchscores", "def score_two(rect1, rect2):\n score = 0.0\n avg_width = (rect1[1][0] + rect2[1][0])/2\n avg_x = (rect1[0][0] + rect2[0][0])/2\n vector = np.array([rect2[0][0] - rect1[0][0], rect2[0][1] - rect1[0][1]])\n length = np.sqrt(np.dot(vector, vector))\n tilt_l = (14.5 - rect1[2])/15\n tilt_r = (14.5 + rect2[2])/15\n if length > 0:\n aim = (avg_x - mid_point)/mid_point\n ratio = 0.2 - avg_width / length\n sine = vector[1] / length\n cosine = vector[0] / length\n score += sine * sine\n score += (1 - cosine)\n score += ratio * ratio\n score += aim * aim\n score += tilt_l * tilt_l\n score += tilt_r * tilt_r\n return score", "def compare(cls, data_hist, ref_hist, tolerance):\n dmean = abs(data_hist.GetMean() - ref_hist.GetMean())\n dwidth = abs(data_hist.GetRMS() - ref_hist.GetRMS())\n score = 70.0 * (dmean < abs(0.3*ref_hist.GetRMS()))\n score += 30.0 * (dwidth < abs(tolerance*ref_hist.GetRMS()))\n if score > 70.0: # both passes: 100\n level = ERROR_LEVELS.OK\n elif score >= 30.0: # only one passes: 70 or 30\n level = ERROR_LEVELS.WARNING\n else: # both fails: 0\n level = ERROR_LEVELS.ERROR\n debug('score: {}, level: {}'.format(score, level))\n return cls.create_final_dict(Score(score), level)", "def mw_test(n1, n2):\r\n # find smaller sample, defined historically as n2. modify the names so we\r\n # don't risk modifying data outside the scope of the function.\r\n if len(n2) > len(n1):\r\n sn1, sn2 = array(n2), array(n1)\r\n else:\r\n sn1, sn2 = array(n1), array(n2)\r\n # sum the ranks of s2 by using the searchsorted magic. the logic is that we\r\n # use a sorted copy of the data from both groups (n1 and n2) to figure out\r\n # at what index we would insert the values from sample 2. by assessing the\r\n # difference between the index that value x would be inserted in if we were\r\n # doing left insertion versus right insertion, we can tell how many values\r\n # are tied with x. this allows us to calculate the average ranks easily.\r\n data = sorted(hstack([sn1, sn2]))\r\n ssl = searchsorted(data, sn2, 'left')\r\n ssr = searchsorted(data, sn2, 'right')\r\n sum_sn2_ranks = ((ssl + ssr + 1) / 2.).sum()\r\n ln1, ln2 = sn1.size, sn2.size\r\n C = (ln1 * ln2) + (ln2 * (ln2 + 1) / 2.) - sum_sn2_ranks\r\n U = max(C, ln1 * ln2 - C)\r\n # now we calculate the pvalue using the normal approximation and the two\r\n # tailed test. our formula corrects for ties, because in the case where\r\n # there are no ties, the forumla on the bottom of pg 429=the formula on the\r\n # bottom of pg 430.\r\n numerator = (U - ln1 * ln2 / 2.)\r\n # follwing three lines give the T value in the formula on page 430. same\r\n # logic as above; we calculate the left and right indices of the unique\r\n # values for all combined data from both samples, then calculate ti**3-ti\r\n # for each value.\r\n ux = unique(data)\r\n uxl = searchsorted(data, ux, 'left')\r\n uxr = searchsorted(data, ux, 'right')\r\n T = _corr_kw(uxr - uxl).sum()\r\n denominator = sqrt(((ln1 * ln2) / float((ln1 + ln2) * (ln1 + ln2 - 1))) * (((ln1 + ln2) ** 3\r\n - (ln1 + ln2) - T) / 12.))\r\n if denominator == 0:\r\n # Warning: probability of U can't be calculated by mw_test\r\n # because all ranks of data were tied. Returning nan as pvalue.\r\n return U, nan\r\n else:\r\n pval = zprob(numerator / float(denominator))\r\n return U, pval", "def get_random_score(bin1,bin2,nsample,bootstrap=1.0):\n totscore=0.0\n isdiagonal=False\n if bin1==bin2:\n isdiagonal=True\n bin1=random.sample(bin1,int(len(bin1)*bootstrap))\n bin2=random.sample(bin2,int(len(bin2)*bootstrap))\n for n in range(0,nsample):\n rbin1=[''.join(random.sample(ali1,len(ali1))) for ali1 in bin1]\n if isdiagonal:\n rbin2=rbin1 #if the two bins are identical, the randomization should also be\n else:\n rbin2=[''.join(random.sample(ali2,len(ali2))) for ali2 in bin2]\n if mixvec:\n vec1=_seqs2vec(rbin1)\n vec2=_seqs2vec(rbin2)\n score=get_subscore_mixvec(vec1,vec2)\n else:\n score=get_subscore_pairwise(rbin1,rbin2)\n totscore+=score\n return totscore/nsample", "def compute_kendall(\n hyp1_scores: list, hyp2_scores: list, dataframe: pd.DataFrame\n) -> (int, list):\n assert len(hyp1_scores) == len(hyp2_scores) == len(data)\n conc, disc = 0, 0\n for i, row in tqdm(data.iterrows(), total=len(data), desc=\"Kendall eval...\"):\n if hyp1_scores[i] > hyp2_scores[i]:\n conc += 1\n else:\n disc += 1\n\n return (conc - disc) / (conc + disc)" ]
[ "0.71725196", "0.65142816", "0.62537", "0.6232226", "0.6183617", "0.61671734", "0.61571336", "0.60737", "0.60692286", "0.5956013", "0.59121007", "0.5891987", "0.58791715", "0.5832929", "0.5825022", "0.5769532", "0.5756441", "0.5727408", "0.5718197", "0.56934917", "0.5683721", "0.5664968", "0.56418645", "0.55996776", "0.5591097", "0.5583803", "0.55732626", "0.5570226", "0.5551903", "0.5542173" ]
0.72700787
0
Evaluates base attribute value of person based on features, age, gender, etc.
def attribute(self, attribute): value = 3 if self.age == "child": value -= 1 if attribute == "physique" or attribute == "phy": if self.age == "adult": value += 1 if self.gender == "male": value += 1 elif self.gender == "female": value -= 1 if attribute == "sensitivity" or attribute == "sns": if self.age == "child": value += 2 if self.gender == "male": value -= 1 elif self.gender == "female": value += 1 if attribute == "agility" or attribute == "agi": if self.age == "child": value += 1 # to be equally as high as adult and young elif self.age == "elder": value -= 1 if attribute == "mind" or attribute == "mnd": if self.age == "elder": value += 1 for feature in self.features: if feature.name == "blood": for key in feature.modifiers: if attribute == key: value += feature.modifiers[key] if value < 1: value = 1 return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature_extraction(_data):\n # Find the digits in the given string Example - data='18-20' digits = '1820'\n digits = str(''.join(c for c in _data if c.isdigit()))\n # calculate the length of the string\n len_digits = len(digits)\n # splitting digits in to values example - digits = '1820' ages = [18, 20]\n ages = [int(digits[i:i + 2]) for i in range(0, len_digits, 2)]\n # checking for special character in the given data\n special_character = '.+-<>?'\n spl_char = ''.join([c for c in list(special_character) if c in _data])\n # handling decimal age data\n if len_digits == 3:\n spl_char = '.'\n age = \"\".join([str(ages[0]), '.', str(ages[1])])\n # normalizing\n age = int(float(age) - 0.5)\n ages = [age]\n # Finding the maximum, minimum, average age values\n max_age = 0\n min_age = 0\n mean_age = 0\n if len(ages):\n max_age = max(ages)\n min_age = min(ages)\n if len(ages) == 2:\n mean_age = int((max_age + min_age) / 2)\n else:\n mean_age = max_age\n # specially added for 18 years cases\n only_18 = 0\n is_y = 0\n if ages == [18]:\n only_18 = 1\n if 'y' in _data or 'Y' in _data:\n is_y = 1\n under_18 = 0\n if 1 < max_age < 18:\n under_18 = 1\n above_65 = 0\n if mean_age >= 65:\n above_65 = 1\n # verifying whether digit is found in the given string or not.\n # Example - data='18-20' digits_found=True data='????' digits_found=False\n digits_found = 1\n if len_digits == 1:\n digits_found = 1\n max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = 0, 0, 0, 0, 0, 0, 0\n elif len_digits == 0:\n digits_found, max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = -1, -1, -1, -1, -1, -1, -1, -1\n \n feature = {\n 'ages': tuple(ages),\n 'len(ages)': len(ages),\n 'spl_chr': spl_char,\n 'is_digit': digits_found,\n 'max_age': max_age,\n 'mean_age': mean_age,\n 'only_18': only_18,\n 'is_y': is_y,\n 'above_65': above_65,\n 'under_18': under_18\n }\n\n return feature", "def evaluate_bias(ds: List[str], predicted: List[GENDER]) -> Dict:\r\n assert (len(ds) == len(predicted)) # must have same length to create tuples\r\n\r\n conf_dict = defaultdict(lambda: defaultdict(lambda: 0))\r\n total = defaultdict(lambda: 0) # increment values if we have any gender\r\n pred_cnt = defaultdict(lambda: 0)\r\n correct_cnt = defaultdict(lambda: 0) # increment values if true_gender == predicted_gender\r\n\r\n count_unknowns = defaultdict(lambda: 0)\r\n\r\n for (gold_gender, word_ind, sent, profession), pred_gender in zip(ds, predicted):\r\n # # IMPORTANTE NOTE :\r\n # need to works with .name of GENDER object for an unknown reason\r\n\r\n if isinstance(pred_gender, str): # can happen in spacy languages 'fr', 'es' or 'it\r\n pred_gender = SPACY_GENDER_TYPES[pred_gender]\r\n\r\n # tuples of values in ds and values in predicted\r\n if pred_gender.name == GENDER.ignore.name:\r\n continue # skip analysis of ignored words\r\n\r\n gold_gender = WB_GENDER_TYPES[gold_gender] # allows Winobias gender type conversion\r\n\r\n if pred_gender.name == GENDER.unknown.name:\r\n count_unknowns[gold_gender] += 1 # increment values for any unknown pred_gender\r\n\r\n profession = profession.lower()\r\n\r\n total[gold_gender] += 1\r\n\r\n if pred_gender.name == gold_gender.name:\r\n correct_cnt[gold_gender] += 1\r\n\r\n pred_cnt[pred_gender.name] += 1\r\n\r\n conf_dict[gold_gender][pred_gender] += 1\r\n\r\n all_total = sum(total.values())\r\n\r\n output_dict = {} # init output dictionnary\r\n # Compute metrics\r\n accuracy = round((sum(correct_cnt.values()) / all_total) * 100, 1) # compute accuracy\r\n output_dict['acc'] = accuracy\r\n\r\n if (total[GENDER.male] == 0) | (pred_cnt[GENDER.male.name] == 0): # Avoid ZeroDivisionError\r\n output_dict['f1_male'] = None\r\n else:\r\n recall_male = round((correct_cnt[GENDER.male] / total[GENDER.male]) * 100, 1) # compute metrics for male\r\n prec_male = round((correct_cnt[GENDER.male] / pred_cnt[GENDER.male.name]) * 100, 1)\r\n f1_male = round(calc_f1(prec_male, recall_male), 1)\r\n output_dict['f1_male'] = f1_male\r\n\r\n if (total[GENDER.female] == 0) | (pred_cnt[GENDER.female.name] == 0): # Avoid ZeroDivisionError\r\n output_dict['f1_female'] = None\r\n else:\r\n recall_female = round((correct_cnt[GENDER.female] / total[GENDER.female]) * 100, 1) # calcul metrics for female\r\n prec_female = round((correct_cnt[GENDER.female] / pred_cnt[GENDER.female.name]) * 100, 1)\r\n f1_female = round(calc_f1(prec_female, recall_female), 1)\r\n output_dict['f1_female'] = f1_female\r\n\r\n output_dict['unk_male'] = count_unknowns[GENDER.male]\r\n output_dict['unk_female'] = count_unknowns[GENDER.female]\r\n output_dict['unk_neutral'] = count_unknowns[GENDER.neutral]\r\n\r\n return output_dict", "def baseEvaluate(self, gameState, action):\n features = self.getFeatures(gameState, action)\n weights = self.getWeights(gameState, action)\n return features * weights", "def get_filter(feature, value):\r\n return {\r\n 'gender': {'user__profile__gender': value},\r\n 'level_of_education': {'user__profile__level_of_education': value},\r\n }[feature]", "def base_contribute_score():\n return 1", "def test_build_feature_base(self):\n data = pd.DataFrame(pd.read_csv(\"tests/in_data/pro1_sub.csv\"))\n\n X = data.ix[:,1]\n Y = data.ix[:,0]\n model_sample = Model([],\"presence\")\n\n feature_base = model_sample.build_feature_base(X,Y)\n feature_evaluation =\n assert_equal(len(feature_base) > 10, True)", "def define_gender(name_input):\n if not os.path.isfile('train_set.txt') and not os.path.isfile('test_set'):\n \"\"\"\n We take a sample of male and female names and mix\n them in order to create a training set and testing set\n \"\"\"\n labeled_names = ([(name, 'male') for name in names.words('male.txt')] +\n [(name, 'female') for name in names.words(\n 'female.txt')])\n random.shuffle(labeled_names)\n\n \"\"\"\n We train the classifier and return the gender of the name\n \"\"\"\n featuresets = [(gender_features(n), gender) for (n, gender)\n in labeled_names]\n train_set, test_set = featuresets[-500:], featuresets[:500]\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n with open('train_set.txt', 'wb') as handle:\n pickle.dump(train_set, handle)\n with open('test_set.txt', 'wb') as handle:\n pickle.dump(test_set, handle)\n with open('classifier.txt', 'wb') as handle:\n pickle.dump(classifier, handle)\n\n with open('train_set.txt', 'rb') as handle:\n train_set = pickle.load(handle)\n with open('test_set.txt', 'rb') as handle:\n test_set = pickle.load(handle)\n with open('classifier.txt', 'rb') as handle:\n classifier = pickle.load(handle)\n\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n# accuracy = nltk.classify.accuracy(classifier, test_set)\n# classifier.show_most_informative_features(10)\n# print accuracy\n\n \"\"\"\n Accuracy: .804\n Most Informative Features\n last_letter = u'a' female : male = 44.0 : 1.0\n last_letter = u'd' male : female = 23.7 : 1.0\n last_two_letters = u'on' male : female = 11.0 : 1.0\n first_two_letters = u'ha' male : female = 7.8 : 1.0\n last_two_letters = u'ta' female : male = 7.0 : 1.0\n last_letter = u't' male : female = 6.7 : 1.0\n last_letter = u'o' male : female = 6.0 : 1.0\n last_two_letters = u'll' male : female = 4.7 : 1.0\n first_two_letters = u'te' male : female = 4.7 : 1.0\n last_two_letters = u'an' male : female = 4.1 : 1.0\n \"\"\"\n\n return classifier.classify(gender_features(name_input))", "def evaluate(self, pred, **nargs):\n sse = sum((pred(u, ds=self, **nargs) - (1 if g == \"F\" else 0)) ** 2\n for (u, g) in self.gender_test.items())\n\n ll = 0\n for (u, g) in self.gender_test.items():\n for prn in [pred(u, ds=self, **nargs)]:\n if g == 'F' and prn == 0:\n ll = math.inf\n break\n elif g == 'F':\n ll += math.log(prn, 2)\n elif g == 'M' and prn == 1:\n ll = math.inf\n break\n else:\n ll += math.log(1 - prn, 2)\n\n return (sse, -ll, sse / len(self.gender_test), -ll / len(self.gender_test))", "def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no", "def testClinicalPatientBMI(self):\n attr = self.session.create_visit_attr()\n\n self.util.floatTypeTest(self, attr, \"bmi\")\n\n self.util.floatPropertyTest(self, attr, \"bmi\")", "def test_classifier_age_estimator(x, class_model):\n return class_model.predict(x)", "def _init_generate_physical_attributes(self):\n # Prepare these now, for speedier access\n config = self.person.cosmos.config\n year = self.person.cosmos.year\n male = self.person.male\n # Determine age of physical peak, i.e., baseball prime\n self.age_of_physical_peak = config.determine_age_of_physical_peak()\n # Determine handedness\n self.lefty = True if random.random() < config.chance_of_being_left_handed else False\n self.righty = not self.lefty\n self.left_handed = 1.0 if self.lefty else 0.0\n self.right_handed = 1.0 if self.righty else 0.0\n # Determine hustle\n self.hustle = config.determine_hustle()\n # Determine adult height this person will attain, in inches\n if male:\n self.adult_height = normal(\n config.adult_male_height_mean(year=year), config.adult_male_height_sd(year=year)\n )\n else:\n self.adult_height = normal(\n config.adult_female_height_mean(year=year), config.adult_female_height_sd(year=year)\n )\n # Determine this person's BMI TODO BMI INCREASES AS ADULTHOOD PROGRESSES\n if male:\n self.bmi = normal(\n config.young_adult_male_bmi_mean(year=year), config.young_adult_male_bmi_sd(year=year)\n )\n else:\n self.bmi = normal(\n config.young_adult_female_bmi_mean(year=year), config.young_adult_female_bmi_sd(year=year)\n )\n # Determine propensities for coordination, reflexes, agility, jumping...\n self.coordination_propensity = config.determine_coordination_propensity()\n self.reflexes_propensity = config.determine_reflexes_propensity(\n coordination_propensity=self.coordination_propensity\n )\n self.agility_propensity = config.determine_agility_propensity()\n self.jumping_propensity = config.determine_jumping_propensity() # Number of inches added/subtracted to base\n # ...and finally footspeed propensity, which is a bit more convoluted to compute\n primitive_coordination = config.determine_primitive_coordination(bmi=self.bmi) if self.bmi > 24 else 1.0\n adult_coordination = primitive_coordination * self.coordination_propensity\n primitive_footspeed = config.determine_primitive_footspeed(\n coordination=adult_coordination, height=self.adult_height\n )\n self.footspeed_propensity = config.determine_footspeed_propensity(primitive_footspeed=primitive_footspeed)\n # Finally, fit these potentials to the person's current age\n self.develop()", "def evaluate(self, features, labels):\n raise NotImplementedError('Not implemented')", "def fitness(self):\n return (len(self.body)**2) * self.age", "def _derived_features(self):\n for created_feature, creator in self.feature_creators.items():\n self.parameters[created_feature] = creator(self.parameters)", "def handle_attributes_features(\n\ttffeatures,\n\tvalfeatures,\n\tattributes,\n\tdefault_value=DEFAULT_VAL_IF_NOT_EXIST,\n\tfeature_separator=FEATURE_SEPARATOR\n\t):\n\tfor (key, val) in attributes.items():\n\t\tif (isinstance(val, dict)):\n\t\t\tfor (inner_key, inner_val) in val.items():\n\t\t\t\tfeature_name = ATTRIBUTES + feature_separator + key + feature_separator + inner_key\n\n\t\t\t\tif is_boolean_set(inner_val):\n\t\t\t\t\t# inner feature is true/false feature\n\t\t\t\t\ttffeatures.append(feature_name)\n\t\t\t\telse:\n\t\t\t\t\t# inner feature is val feature\n\t\t\t\t\tinner_val.add(default_value)\n\t\t\t\t\tvalfeatures[feature_name] = inner_val\n\t\telse:\n\t\t\tfeature_name = ATTRIBUTES + feature_separator + key\n\n\t\t\tif is_boolean_set(val):\n\t\t\t\t# feature is true/false feature\n\t\t\t\ttffeatures.append(feature_name)\n\t\t\telse:\n\t\t\t\t# feature is val feature\n\t\t\t\tval.add(default_value)\n\t\t\t\tvalfeatures[feature_name] = val", "def testClinicalPatientAge(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"age\")\n\n self.util.intPropertyTest(self, attr, \"age\")", "def baseline(*args):\n XTrain, XTest, yTrain, yTest = args\n clf = DecisionTreeClassifier(random_state=42)\n clf.fit(XTrain, yTrain)\n return clf.score(XTest, yTest), clf.feature_importances_", "def _init_inherit_physical_attributes(self):\n config = self.person.cosmos.config\n mother, father = self.person.biological_mother, self.person.biological_father\n parents = (mother.body, father.body)\n # Handedness\n if random.random() < config.heritability_of_handedness:\n takes_after = random.choice(parents)\n self.left_handed = Feature(value=takes_after.left_handed, inherited_from=takes_after)\n self.right_handed = Feature(value=takes_after.right_handed, inherited_from=takes_after)\n # Hustle\n if random.random() < config.heritability_of_hustle:\n takes_after = random.choice(parents)\n inherited_hustle = takes_after.hustle\n mutated_hustle = normal(inherited_hustle, config.hustle_mutation_sd)\n self.hustle = Feature(value=mutated_hustle, inherited_from=takes_after)\n else:\n pass # TODO SET UP GENERATING FROM NOTHING", "def decision_function(self, X):\n ...", "def val_acc(self):\n raise Exception(\" not implemented in base model\")", "def evaluate(self, attributes):\n return self.predicate(attributes[self.name])", "def importance(attributes, examples, random=False):\n argmax_a = defaultdict(int)\n # Calculates the following for every attribute: gain(a) = b(p/p+n) - remainder(a)\n for attribute in attributes:\n if random:\n argmax_a[attribute] = random_importance_function()\n continue\n\n # True/False count for entire set (p and n)\n p, n, pk, nk = get_true_false_classifications(examples)\n b_ = b((p / (p + n)))\n remainder_a = remainder(attribute, examples)\n argmax_a[attribute] = b_ - remainder_a\n return max(argmax_a.items(), key=itemgetter(1))[0]", "def __init__(self, persona: dict)->None:\n self.gender = persona['gender']\n self.age = persona['age']\n self.hypertension = 1 if 'hypertension' in persona else 0\n self.heart_disease = 1 if 'heart_disease' in persona else 0\n self.ever_married =\"Yes\" if 'ever_married' in persona else \"No\"\n self.work_type = persona['work_type']\n self.Residence_type = persona['Residence_type']\n self.avg_glucose_level = persona['avg_glucose_level']\n self.bmi = persona['bmi']\n self.smoking_status = persona['smoking_status']", "def get_age(self):\n return self.glb[iage]", "def get_titanic_fea(dataset):\n dataset['Name_length'] = dataset['Name'].apply(len)\n\n # Mapping Sex 不在map定义的 就是NaN\n dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n dataset['Has_Cabin'] = dataset['Cabin'].apply(lambda x: 0 if type(x) == float else 1)\n dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1\n\n dataset['IsAlone'] = 0\n dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1\n\n # [Embarked]\n dataset['Embarked'] = dataset['Embarked'].fillna('0')\n dataset['Fare'] = dataset['Fare'].fillna(0)\n # Mapping Embarked\n dataset['Embarked'] = dataset['Embarked'].map({'0': 0, 'S': 1, 'C': 2, 'Q': 3}).astype(int)\n\n # [Fare]\n dataset['CategoricalFare'] = pd.qcut(dataset['Fare'], 4)\n # Mapping Fare\n dataset.loc[dataset['Fare'] <= 7.91, 'Fare'] = 0\n dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1\n dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2\n dataset.loc[dataset['Fare'] > 31, 'Fare'] = 3\n dataset['Fare'] = dataset['Fare'].astype(int)\n\n # [Age]\n age_avg = dataset['Age'].mean()\n age_std = dataset['Age'].std()\n age_null_count = dataset['Age'].isnull().sum()\n age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count)\n dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list\n dataset['Age'] = dataset['Age'].astype(int)\n dataset['CategoricalAge'] = pd.cut(dataset['Age'], 5)\n # Mapping Age\n dataset.loc[dataset['Age'] <= 16, 'Age'] = 0\n dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1\n dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2\n dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3\n dataset.loc[dataset['Age'] > 64, 'Age'] = 4\n\n # [Name]\n # 称谓 Mr 、Miss 等\n def get_title(name):\n title_search = re.search(' ([A-Za-z]+)\\.', name)\n # If the title exists, extract and return it.\n if title_search:\n return title_search.group(1)\n return \"\"\n dataset['Title'] = dataset['Name'].apply(get_title)\n\n # 只保留4类Title\n dataset['Title'] = dataset['Title'].replace(\n ['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')\n # Mapping titles\n title_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Rare\": 5}\n dataset['Title'] = dataset['Title'].map(title_mapping)\n dataset['Title'] = dataset['Title'].fillna(0)\n\n # Feature selection\n drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']\n dataset = dataset.drop(drop_elements, axis=1)\n dataset = dataset.drop(['CategoricalAge', 'CategoricalFare'], axis=1)\n\n return dataset", "def compute_feature_properties(self):\n\n self.valuecounts = {}\n self.unique_values = {}\n self.missing_ratios = {}\n self.counts = {}\n self.codemaps = {}\n for f in self.features:\n # Compute various things\n all_values = [self.data[l].get(f,\"?\") for l in self.data]\n missing_data_ratio = all_values.count(\"?\") / (1.0*len(all_values))\n non_q_values = [v for v in all_values if v != \"?\"]\n counts = {}\n for v in non_q_values:\n counts[v] = non_q_values.count(v)\n unique_values = list(set(non_q_values))\n # Sort unique_values carefully.\n # Possibly all feature values are numeric strings, e.g. \"1\", \"2\", \"3\".\n # If we sort these as strings then we get weird things like \"10\" < \"2\".\n # This can actually matter for things like ordinal models.\n # So convert these to ints first...\n if all([v.isdigit() for v in unique_values]):\n unique_values = list(map(int, unique_values))\n unique_values.sort()\n unique_values = list(map(str, unique_values))\n # ...otherwise, just sort normally\n else:\n unique_values.sort()\n self.unique_values[f] = unique_values\n\n N = len(unique_values)\n self.valuecounts[f] = N\n self.missing_ratios[f] = missing_data_ratio\n self.counts[f] = counts\n self.codemaps[f] = self.build_codemap(unique_values)", "def test_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert [i == j for i, j in zip(atom.lr.features, atom.features)]", "def predict():\n to_predict = np.zeros(5).reshape(1, 5)\n features = ['is_male', 'num_interactions_with_cust_service', 'late_on_payment', 'age', 'years_in_contract']\n for i, feat in enumerate(features):\n if request.args.get(feat) is not None:\n to_predict[0][i] = request.args.get(feat)\n\n response = clf2.predict(to_predict)\n\n if response:\n return \"The customer is likely to churn\"\n else:\n return \"He is a loyal customer\"", "def getFitness(individual, X, y):\n\n if(individual.count(0) != len(individual)):\n # get index with value 0\n cols = [index for index in range(\n len(individual)) if individual[index] == 0]\n\n # get features subset\n X_parsed = X.drop(X.columns[cols], axis=1)\n X_subset = pd.get_dummies(X_parsed)\n\n # apply classification algorithm\n #clf = LogisticRegression()\n clf = GaussianNB()\n #clf.fit(X_subset, y)\n #return accuracy_score(y, clf.predict(X_subset), normalize = True)\n return (avg(cross_val_score(clf, X_subset, y, cv=2)),)\n else:\n return(0,)" ]
[ "0.5633302", "0.56305707", "0.5630525", "0.55070186", "0.5478344", "0.54693484", "0.54613477", "0.54573274", "0.5453816", "0.54529595", "0.5422918", "0.5317995", "0.53045845", "0.52880687", "0.5277273", "0.527475", "0.52594405", "0.52482104", "0.5226759", "0.5215583", "0.5201968", "0.5197918", "0.5181106", "0.5175793", "0.51732326", "0.51658297", "0.51527673", "0.514767", "0.51356107", "0.5122425" ]
0.6449075
0
Permute the rows of _X_ to minimize error with Y, ignoring signs of the columns X numpy.array input matrix Y numpy.array comparison matrix numpy.array X with permuted rows
def match_rows_sign(X, Y): n, d = X.shape n_, d_ = Y.shape assert n == n_ and d == d_ # Create a weight matrix to compare the two W = zeros((n, n)) for i, j in it.product(xrange(n), xrange(n)): # Cost of 'assigning' j to i. W[i, j] = min(norm(X[j] - Y[i]), norm(X[j] + Y[i]) ) matching = Munkres().compute( W ) matching.sort() _, rowp = zip(*matching) rowp = array( rowp ) # Permute the rows of B according to Bi X_ = X[ rowp ] # Change signs to minimize loss for row in xrange(n): if norm(X_[row] + Y[row]) < norm(X_[row] - Y[row]): X_[row] *= -1 return X_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_rows(X, Y):\n n, d = X.shape\n n_, d_ = Y.shape\n assert n == n_ and d == d_\n\n # Create a weight matrix to compare the two\n W = zeros((n, n))\n for i, j in it.product(xrange(n), xrange(n)):\n # Cost of 'assigning' j to i.\n W[i, j] = norm(X[j] - Y[i])\n\n matching = Munkres().compute( W )\n matching.sort()\n _, rowp = zip(*matching)\n rowp = array( rowp )\n # Permute the rows of B according to Bi\n X_ = X[ rowp ]\n\n return X_", "def _diffmat_objective(a,X):\n \n (n,p) = X.shape\n return(X - np.tile(a,(n,1)))", "def minimization(X, Y, r, L, max_iter):\r\n # Define the number of data points and dimension\r\n N = float(X.shape[0])\r\n d = X.shape[1]\r\n # Calculate the number of classes, q\r\n classes, q = [], 0\r\n for i in Y:\r\n if i not in classes:\r\n classes.append(i)\r\n q += 1\r\n # Calculate averages and M matrix\r\n classes_X = [[] for i in range(q)]\r\n for i, h in enumerate(classes):\r\n for j, k in enumerate(Y):\r\n if k == i:\r\n classes_X[i].append(X[j])\r\n mu_g = []\r\n for i, j in enumerate(classes_X):\r\n mu_g.append(np.mean(j, axis=0))\r\n mu = np.mean(X, axis=0)\r\n M = np.zeros((q, d))\r\n for i in range(q):\r\n M[i] = mu_g[i] - mu\r\n # Calculate sigma_xx\r\n Xc = np.transpose(X[0] - mu)\r\n for i in range(1, int(N)):\r\n Xc = np.vstack((Xc, np.transpose(X[i] - mu)))\r\n sigma_xx = np.matmul(Xc.T, Xc) / N\r\n # Calculate the size of each class\r\n Ng = []\r\n for i in classes:\r\n Ng.append((Y == i).sum())\r\n # Calculate the C matrix\r\n C = np.zeros((q, q))\r\n for i in range(q):\r\n C[i, i] = Ng[i] / N\r\n # Calculate C^(-1/2)\r\n C12 = np.zeros((q, q))\r\n for i in range(q):\r\n C12[i, i] = C[i, i] ** -0.5\r\n # Calculate initial theta\r\n Id = np.identity(r)\r\n z = np.zeros((q - r, r))\r\n theta = np.matmul(C12, np.vstack((Id, z)))\r\n # Initialize flags and error tolerance\r\n CONVERGED = False\r\n MAXITER_REACHED = False\r\n eps = 1e-06\r\n # Define constraints\r\n cons = ({'type': 'eq', 'fun': lambda x: constraint_1(x, q, r, C)},\r\n {'type': 'eq', 'fun': lambda x: constraint_2(x, q, r, C)})\r\n # Initialize error\r\n b = np.random.random((d, r))\r\n error = F_lambda(theta, b, q, d, r, M, C, sigma_xx, L)\r\n cost = []\r\n # Initialize counters for individual algorithm iterations\r\n b_it, theta_it = 0, 0\r\n # Run minimization algorithm\r\n for i in range(max_iter):\r\n # Save value of error from the previous iteration\r\n prev_error = error\r\n if i % 2 == 0:\r\n # Minimize b with respect to fixed theta using BFGS\r\n b_flat = b.flatten()\r\n b_min = minimize(lambda x: F_lambda(\r\n theta, x, q, d, r, M, C, sigma_xx, L), b_flat, method='BFGS')\r\n b = np.reshape(b_min.x, (d, r))\r\n b_it += b_min.nit\r\n # Check if rank(Mb) >= r\r\n test = np.linalg.matrix_rank(np.matmul(M, b))\r\n if test < r:\r\n raise Exception(\"Error: rank(Mb) < r\")\r\n else:\r\n # Minimize theta with respect to fixed b using SLSQP\r\n theta_flat = theta.flatten()\r\n theta_min = minimize(lambda x: F_lambda(\r\n x, b, q, d, r, M, C, sigma_xx, L), theta_flat, method='SLSQP',\r\n constraints=cons)\r\n theta = np.reshape(theta_min.x, (q, r))\r\n theta_it += theta_min.nit\r\n # Check if function has been minimized\r\n error = F_lambda(theta, b, q, d, r, M, C, sigma_xx, L)\r\n cost.append(error)\r\n CONVERGED = abs(error - prev_error) < eps\r\n if CONVERGED:\r\n break\r\n # Check if the maximum number of iterations has been reached\r\n MAXITER_REACHED = i == max_iter - 1\r\n if CONVERGED:\r\n print(\"Overall algorithm iterations: %d\" % i)\r\n print(\"Combined number of iterations for BFGS: %d\" % b_it)\r\n print(\"Combined number of iterations for SLSQP: %d\" % theta_it)\r\n print(\"Total number of iterations: %d\" % (b_it + theta_it))\r\n elif MAXITER_REACHED:\r\n print(\"The maximum number of iterations has been reached.\")\r\n it = np.arange(0, i + 1)\r\n return b, theta, mu, it, cost", "def _rerank_mp(x_and_ys, shared_inputs):\n x, ys = x_and_ys\n x2ys, x2cnt, x2xs, width, n_trans = shared_inputs\n\n sorted_ys = sorted(ys.items(),\n key=operator.itemgetter(1),\n reverse=True)[:width]\n if x not in x2xs:\n return x, [y for y, score in sorted_ys[:n_trans]]\n\n def _correction(y):\n return sum(\n cntx2 * x2ys[x2][y] / float(x2cnt[x2])\n for x2, cntx2 in x2xs[x].items() if x2 in x2ys and y in x2ys[x2]\n )\n\n y_scores = [(y, cnty - _correction(y)) for y, cnty in sorted_ys]\n y_scores = sorted(y_scores, key=operator.itemgetter(1), reverse=True)\n reranked_ys = [y for y, score in y_scores[:n_trans]]\n return x, reranked_ys", "def ridge_regression(X, Y, alpha=0.2):\n m, n = np.shape(X)\n theta = X.T * X + np.eye(n) * alpha\n if np.linalg.det(theta) == 0:\n print(\"numpy.linalg.linalg.LinAlgError: Singular matrix not reversible\")\n return\n theta = theta.I * X.T * Y\n return theta", "def matrix_regression(Y, X, lmbd=-1, L=-1, symmetric=True, iterations=5000, etol=10e-5, verbose=False):\n # check the dimensions of Y and X\n if Y.shape[1] > X.shape[1]:\n raise ValueError('X must have at least as many columns as Y.')\n if X.shape[0] != X.shape[0]:\n raise ValueError('X and Y must have the same row dimension.')\n if Y.ndim != 2 or X.ndim != 2:\n raise ValueError('X and Y must be matrices.')\n\n # default penalty parameter\n if lmbd <= 0:\n lmbd = 2 * (np.sqrt(Y.shape[1]) + np.sqrt(X.shape[1]) + 1) * (np.sqrt(X.shape[1]) + np.sqrt(X.shape[0]))\n\n # initial guess for solution\n prev_W = symmetrize(np.random.rand(X.shape[1],Y.shape[1]))\n Z = prev_W\n\n # compute Lipschitz constant for optimizer\n if L == -1:\n U, s, V = np.linalg.svd(X.T.dot(X))\n L = s[0]\n\n iters = 0\n err = 1\n alpha = 1\n \n # Implements step 3 of Algorithm 2 of Ji and Ye (2009). Other steps are avoided because we already computed the Lipschitz constant.\n while iters < iterations and err > etol:\n W = gradient_step(Y, X, lmbd, L, Z) # first part of step 3\n prev_alpha = alpha\n alpha = (1 + np.sqrt(1 + 4*(prev_alpha**2)))/2 # second part of step 3, equation (18)\n Z = W + ((prev_alpha - 1)/alpha) * (W - prev_W) # third part of step 3, equation (19)\n \n err = np.abs(prev_W - W).mean() # measure error relative to previous step\n iters += 1\n prev_W = W # update\n\n if iters%100==0 and verbose: print('Iteration {}. Error {}'.format(iters,err))\n \n if verbose: print('Iteration {}. Error {}'.format(iters,err))\n if iters == iterations: print('Warning: max iterations hit.')\n \n if symmetric: W = symmetrize(W) # optionally impose constraints on graph\n return W", "def CorrectPerm(X0,X):\n\n PiA = np.dot(lng.inv(np.dot(X0.T,X0)),X0.T)\n Diff = np.dot(PiA,X)\n\n z = np.shape(X)\n\n for ns in range(0,z[1]):\n Diff[ns,:] = abs(Diff[ns,:])/max(abs(Diff[ns,:]))\n\n Xout = np.ones(z)\n\n for ns in range(0,z[1]):\n Xout[:,np.nanargmax(Diff[ns,:])] = X[:,ns]\n\n return Xout,PiA", "def match_columns_sign(X, Y):\n\n return match_rows_sign(X.T, Y.T).T", "def MATSOL(N,A):\r\n\r\n X = np.zeros((N+1),dtype=float) # X.shape = N+1\r\n NROW = np.arange(0,N+1,dtype=int) # NROW.shape = N+1\r\n\r\n for i in np.arange(N): # loop through rows\r\n AMAX = np.max(np.abs(A[NROW[i:],i])) # max value for column, all later rows\r\n ip = np.argmax(np.abs(A[NROW[i:],i]))+i # index of above\r\n \r\n if(abs(AMAX) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n if(NROW[i] != NROW[ip]): # swap rows\r\n NC = NROW[i].copy()\r\n NROW[i] = NROW[ip].copy()\r\n NROW[ip] = NC.copy()\r\n \r\n \r\n COEF = A[NROW[i+1:],i]/A[NROW[i],i] # normalize column values by maximum magnitude value (AMAX > 0)\r\n A[NROW[i+1:],i+1:] = A[NROW[i+1:],i+1:] - np.dot(COEF[:,None],A[NROW[i],i+1:][None,:]) # normalize/reduce matrix\r\n \r\n \r\n if(abs(A[NROW[N],N]) <= 1E-08):\r\n print('Singular matrix --> No unique solution exists')\r\n return X\r\n \r\n X[N] = A[NROW[N],N+1]/A[NROW[N],N] # downstream edge\r\n i = N-1\r\n while (i >= 0):\r\n# SUMM = 0.0\r\n# j = i+1\r\n \r\n SUMM = np.sum(A[NROW[i],i+1:N+1]*X[i+1:N+1]) # do not include final column\r\n \r\n# while (j <= N-1):\r\n# SUMM = A[NROW[i],j]*X[j] + SUMM\r\n# j = j+1\r\n # print(SUMM,SUMM2)\r\n \r\n X[i] = (A[NROW[i],N+1] - SUMM)/A[NROW[i],i]\r\n i = i-1\r\n return X", "def _compute_multipliers_nu(self, X, y):\n n_samples, n_features = X.shape\n\n K = self._gram_matrix(X)\n\n P = cvxopt.matrix(np.outer(y, y) * K)\n q = cvxopt.matrix(np.zeros(n_samples))\n\n # -a_i < = 0\n G_std = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))\n h_std = cvxopt.matrix(np.zeros(n_samples))\n\n # a_i < = 1/n\n G_slack = cvxopt.matrix(np.diag(np.ones(n_samples)))\n h_slack = cvxopt.matrix(np.ones(n_samples) * 1/n_samples)\n\n # np.sum(a) >= 1\n G_nu = cvxopt.matrix(np.ones(1, n_samples))\n h_nu = cvxopt.matrix(self._c)\n\n # Stack contraints vertically to satisfy 0 <= a <= C\n G = cvxopt.matrix(np.vstack((G_std, G_slack, G_nu)))\n h = cvxopt.matrix(np.vstack((h_std, h_slack, h_nu)))\n\n A = cvxopt.matrix(y, (1, n_samples))\n b = cvxopt.matrix(0.0)\n\n solution = cvxopt.solvers.qp(P, q, G, h, A, b)\n\n # return a list of lagrange multipliers\n return np.ravel(solution['x'])", "def Mat_CorrectPerm(X0,X):\n\n Xout = dp(X)\n\n nX = np.shape(X)\n\n for rx in range(nX[2]):\n for ry in range(nX[3]):\n Xt = X[:,:,rx,ry]\n xx,p=CorrectPerm(X0,Xt)\n Xout[:,:,rx,ry]=xx\n\n return Xout", "def standard_reg(x_arr, y_arr):\n\n x = np.mat(x_arr)\n y = np.mat(y_arr).T\n\n x_squared = x.T * x\n if np.linalg.det(x_squared) == 0.0:\n print(\"Matrix is singular, cannot do inverse.\")\n return\n\n return x_squared.I * (x.T * y)", "def matrixReduction(setHor, setVer, arrayToReduce):\r\n listTemp = []\r\n for i in range(len(setVer)):\r\n listTemp.append(arrayToReduce[setVer[i].index, :])\r\n arrayTemp = numpy.array(listTemp)\r\n listTemp = []\r\n for i in range(len(setHor)):\r\n listTemp.append(arrayTemp[:, setHor[i].index])\r\n result = numpy.transpose(numpy.array(listTemp))\r\n\r\n return result", "def inverse_transform(self, X):\n # No warning for y, since there's no y variable.\n # This correpsonds to function signature in scikit-learn's code base\n X = X.copy() # type: pd.DataFrame\n X.loc[:, self._feature_mask_] *= self.scale_\n X.loc[:, self._feature_mask_] += self.min_\n return X", "def compute_assignment(cost_matrix, munkres):\n cost_matrix = np.array(cost_matrix)\n\n if np.any(cost_matrix < 0):\n raise ValueError(\"cost_matrix contains negative values!\")\n\n # Munkres only handles square and rectangular matrices. It does *not* handle irregular matrices!\n # As a workaround the matrix is transposed and corresponding pairs are flipped.\n h, w = cost_matrix.shape\n if h > w:\n low_cost_pairs = np.array(munkres.compute(cost_matrix.T))[:, ::-1]\n else:\n low_cost_pairs = np.array(munkres.compute(cost_matrix))\n\n return low_cost_pairs", "def spearman_coefficient(X, Y):\n # should not need X_norm_squared because if you could precompute that as\n # well as Y, then you should just pre-compute the output and not even\n # call this function.\n if X is Y:\n X = Y = np.asanyarray(X, dtype=[('x', 'S30'), ('y', float)])\n else:\n X = np.asanyarray(X, dtype=[('x', 'S30'), ('y', float)])\n Y = np.asanyarray(Y, dtype=[('x', 'S30'), ('y', float)])\n\n if X.shape[1] != Y.shape[1]:\n raise ValueError(\"Incompatible dimension for X and Y matrices\")\n\n X.sort(order='y')\n Y.sort(order='y')\n\n result = []\n\n #TODO: Check if it is possible to optimize this function\n i = 0\n for arrayX in X:\n result.append([])\n for arrayY in Y:\n Y_keys = [key for key, value in arrayY]\n\n XY = [(key, value) for key, value in arrayX if key in Y_keys]\n\n sumDiffSq = 0.0\n for index, tup in enumerate(XY):\n sumDiffSq += pow((index + 1) - (Y_keys.index(tup[0]) + 1), 2.0)\n\n n = len(XY)\n if n == 0:\n result[i].append(0.0)\n else:\n result[i].append(1.0 - ((6.0 * sumDiffSq) / (n * (n * n - 1))))\n result[i] = np.asanyarray(result[i])\n i += 1\n\n return np.asanyarray(result)", "def fit(self, X: np.ndarray, y: np.ndarray, weights: np.ndarray) -> None:\n if not len(X) == len(y) == len(weights):\n raise ValueError(\"First dimension of arguments must be equal.\")\n if abs(weights).sum() == 0:\n raise ValueError(\"Weights must not be all 0.\")\n\n best_error = np.inf\n best_indices: Tuple[int, int] = (0, 0)\n for i in range(len(X)):\n for j in range(X.shape[1]):\n left_indices = X[:, j] < X[i, j]\n right_indices = np.logical_not(left_indices)\n left_weights = weights[left_indices]\n right_weights = weights[right_indices]\n left_y = y[left_indices]\n right_y = y[right_indices]\n\n error = (\n left_weights[left_y != -1].sum()\n + right_weights[right_y != -1].sum() # THIS IS CORRECT\n )\n error = error / weights.sum()\n if error < best_error:\n best_error = error\n best_indices = (i, j)\n\n self.threshold = X[best_indices]\n self.feature = best_indices[1]", "def _compute_multipliers(self, X, y):\n n_samples, n_features = X.shape\n\n K = self._gram_matrix(X)\n\n P = cvxopt.matrix(np.outer(y, y) * K)\n q = cvxopt.matrix(-1 * np.ones(n_samples))\n\n # -a_i < = 0\n G_std = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))\n h_std = cvxopt.matrix(np.zeros(n_samples))\n\n # a_i < = c\n G_slack = cvxopt.matrix(np.diag(np.ones(n_samples)))\n h_slack = cvxopt.matrix(np.ones(n_samples) * self._c)\n\n # Stack contraints vertically to satisfy 0 <= a <= C\n G = cvxopt.matrix(np.vstack((G_std, G_slack)))\n h = cvxopt.matrix(np.vstack((h_std, h_slack)))\n\n A = cvxopt.matrix(y, (1, n_samples))\n b = cvxopt.matrix(0.0)\n\n solution = cvxopt.solvers.qp(P, q, G, h, A, b)\n\n # return a list of lagrange multipliers\n return np.ravel(solution['x'])", "def isotonic_regression(Y):\r\n\tres = np.ones_like(Y)*Y.sum()/(Y.shape[0]*Y.shape[1])\r\n\ts = np.zeros(Y.shape, dtype=np.float)\r\n\tb = np.zeros(Y.shape, dtype=np.int)\r\n\tu = np.zeros(Y.shape, dtype=np.int)\r\n\tpoints = []\r\n\tfor i in range(Y.shape[0]):\r\n\t\trow = []\r\n\t\tfor j in range(Y.shape[1]):\r\n\t\t\trow.append([i,j])\r\n\t\tpoints.append(tuple(row))\r\n\tdata = Y - res\r\n\tpartition(data, s, b, u, res, points, Y.shape[0]*Y.shape[1], 20)\r\n\treturn res", "def _mat_mat_corr_sparse(\n X: csr_matrix,\n Y: np.ndarray,\n) -> np.ndarray:\n n = X.shape[1]\n\n X_bar = np.reshape(np.array(X.mean(axis=1)), (-1, 1))\n X_std = np.reshape(\n np.sqrt(np.array(X.power(2).mean(axis=1)) - (X_bar ** 2)), (-1, 1)\n )\n\n y_bar = np.reshape(np.mean(Y, axis=0), (1, -1))\n y_std = np.reshape(np.std(Y, axis=0), (1, -1))\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\n \"ignore\", r\"invalid value encountered in true_divide\"\n )\n return (X @ Y - (n * X_bar * y_bar)) / ((n - 1) * X_std * y_std)", "def distort_vxs(self):\n self.dvxs = self.vxs.copy()\n self.dvxs[:, 0] += self.perlin()\n self.dvxs[:, 1] += self.perlin()", "def minimum_mean_square_error(x, y):\n x_pseudoinv = np.linalg.pinv(x)\n return x_pseudoinv * x.T * y", "def fit(self, X, Y):\n\n # copy since this will contains the residuals (deflated) matrices\n check_consistent_length(X, Y)\n X = check_array(X, dtype=np.float64, copy=True)\n Y = check_array(Y, dtype=np.float64, copy=True, ensure_2d=False)\n if Y.ndim == 1:\n Y = Y.reshape(-1, 1)\n\n n = X.shape[0]\n p = X.shape[1]\n q = Y.shape[1]\n\n if self.n_components < 1 or self.n_components > p:\n raise ValueError('Invalid number of components: %d' %\n self.n_components)\n # Scale (in place)\n X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (\n _center_scale_xy(X, Y, self.scale))\n # Residuals (deflated) matrices\n Xk = X.copy()\n Yk = Y.copy()\n# STEP 1\n self.x_params = params_initialize(kind=self.x_kind)\n self.y_params = params_initialize(kind=self.y_kind)\n # Results matrices\n# STEP 2\n self.x_scores_ = np.zeros((n, self.n_components))\n self.y_scores_ = np.zeros((n, self.n_components))\n self.x_weights_ = np.zeros((p, self.n_components))\n self.y_weights_ = np.zeros((q, self.n_components))\n self.x_loadings_ = np.zeros((p, self.n_components))\n self.y_loadings_ = np.zeros((q, self.n_components))\n self.n_iter_ = []\n\n # NIPALS algo: outer loop, over components\n# STEP 3\n for k in range(self.n_components):\n if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):\n # Yk constant\n warnings.warn('Y residual constant at iteration %s' % k)\n break\n # 1) weights estimation (inner loop)\n # -----------------------------------\n# STEP 17\n x_weights, y_weights, n_iter_ = \\\n _nipals_twoblocks_inner_loop(\n X=Xk, Y=Yk, max_iter=self.max_iter,\n tol=self.tol, x_kind=self.x_kind, y_kind=self.y_kind,\n x_params=self.x_params, y_params=self.y_params, flag_first_iter=(k == 0),\n learning_rate=self.learning_rate)\n self.n_iter_.append(n_iter_)\n # Forces sign stability of x_weights and y_weights\n # Sign undeterminacy issue from svd if algorithm == \"svd\"\n # and from platform dependent computation if algorithm == 'nipals'\n x_weights, y_weights = svd_flip(x_weights, y_weights.T)\n y_weights = y_weights.T\n # compute scores\n \n Xk_hat = f(Xk, kind=self.x_kind, params=self.x_params)\n Yk_hat = f(Yk, kind=self.y_kind, params=self.y_params)\n \n x_scores = np.dot(Xk_hat, x_weights)\n y_ss = np.dot(y_weights.T, y_weights)\n y_scores = np.dot(Yk_hat, y_weights) / y_ss\n # test for null variance\n if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:\n warnings.warn('X scores are null at iteration %s' % k)\n break\n # 2) Deflation (in place)\n # ----------------------\n # Possible memory footprint reduction may done here: in order to\n # avoid the allocation of a data chunk for the rank-one\n # approximations matrix which is then subtracted to Xk, we suggest\n # to perform a column-wise deflation.\n #\n# STEP 19\n x_loadings = np.dot(Xk_hat.T, x_scores) / np.dot(x_scores.T, x_scores)\n y_loadings = (np.dot(Yk_hat.T, x_scores)\n / np.dot(x_scores.T, x_scores))\n # - regress Xk's on x_score\n # - subtract rank-one approximations to obtain remainder matrix\n# STEP 22\n Xk_hat -= np.dot(x_scores, x_loadings.T)\n # - regress Yk's on x_score, then subtract rank-one approx.\n# STEP 23\n Yk_hat -= np.dot(x_scores, y_loadings.T)\n# STEP 24\n Xk = finv(Xk_hat, kind=self.x_kind, params=self.x_params)\n Yk = finv(Yk_hat, kind=self.y_kind, params=self.y_params)\n # 3) Store weights, scores and loadings # Notation:\n self.x_scores_[:, k] = x_scores.ravel() # T\n self.y_scores_[:, k] = y_scores.ravel() # U\n self.x_weights_[:, k] = x_weights.ravel() # W\n self.y_weights_[:, k] = y_weights.ravel() # C\n self.x_loadings_[:, k] = x_loadings.ravel() # P\n self.y_loadings_[:, k] = y_loadings.ravel() # Q\n # Such that: X = TP' + Err and Y = UQ' + Err\n\n # 4) rotations from input space to transformed space (scores)\n # T = X W(P'W)^-1 = XW* (W* : p x k matrix)\n # U = Y C(Q'C)^-1 = YC* (C* : q x k matrix)\n self.x_rotations_ = np.dot(\n self.x_weights_,\n pinv2(np.dot(self.x_loadings_.T, self.x_weights_),\n check_finite=False))\n if Y.shape[1] > 1:\n self.y_rotations_ = np.dot(\n self.y_weights_,\n pinv2(np.dot(self.y_loadings_.T, self.y_weights_),\n check_finite=False))\n else:\n self.y_rotations_ = np.ones(1)\n\n # Estimate regression coefficient\n # Regress Y on T\n # Y = TQ' + Err,\n # Then express in function of X\n # Y = X W(P'W)^-1Q' + Err = XB + Err\n # => B = W*Q' (p x q)\n self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)\n # self.coef_ = self.coef_ * self.y_std_\n return self", "def cross_validation(X, Y):\n m, n = np.shape(X)\n ridge_weights, alphas = generate_parameters(X[:500], Y[:500])\n _, X_mean, X_std = feature_normalize(X[:500])\n X_test = (X[500:] - X_mean) / X_std\n for i, weight in enumerate(ridge_weights):\n # [:, np.newasix] change (3,) to (3,1)\n error = np.power(X_test * weight[:, np.newaxis] - Y[500:], 2).sum()\n print('alpha {}, test error is {}'.format(alphas[i], error))", "def linear_regression_invertible(X, y):\n #####################################################\n # TODO 3: Fill in your code here #\n #####################################################\n w = None\n xtx = np.dot(X.T, X)\n while True:\n eigen= np.absolute(np.linalg.eigvals(xtx))\n if np.min(eigen) < 0.00001:\n xtx = np.add(xtx, np.identity(len(xtx))*0.1)\n else:\n w = np.dot(np.dot(np.linalg.inv(xtx), X.T), y)\n break\n return w", "def reweigh(X, y, S):\n\n\tX['label'] = y\n\n\tW = pd.DataFrame({'group': [1, 1, 0, 0], 'label': [1, 0, 1, 0]})\n\n\t# Calculate weight for each combination of sensitive attribute and class,\n\t# given by the expected probability of an example being in a certain group\n\t# and class if sensitive attribute/class are independent, divided by the\n\t# observed probability\n\tweights = [[len(X[X[S] == s]) * len(X[X['label'] == c]) \\\n\t\t\t\t/ float(len(X) * len(X[(X[S] == s) & (X['label'] == c)])) \\\n\t\t\t\tfor c in [1, 0]] for s in [1, 0]]\n\n\tW['weight'] = [i for j in weights for i in j]\n\t\n\tX_prime = X.copy()\n\tX_prime['weight'] = 0\n\n\t# Add weights according to class/group\n\tfor s in [1, 0]:\n\t\tfor c in [1, 0]:\n\t\t\tw = W.loc[(W['group'] == s) & (W['label'] == c), 'weight']\n\t\t\tX_prime.loc[(X[S] == s) & (X['label'] == c), 'weight'] = w.iloc[0]\n\n\tX.drop('label', axis = 1, inplace = True)\n\ty_prime = X_prime['label'].tolist()\n\tX_prime = X_prime.drop('label', axis = 1)\n\n\treturn(X_prime, y_prime)", "def inv_IpXY(X, Y):\r\n d1 = X.shape[0]\r\n d2 = X.shape[1]\r\n if d1 > d2:\r\n M = np.eye(d1) - np.dot(np.dot(X, LA.inv(np.eye(d2) + np.dot(Y, X))), Y)\r\n else:\r\n M = LA.inv(np.eye(d1) + np.dot(X, Y))\r\n return M", "def _optimize(self, X, y, W, steps):\n\n X = X.flatten(1)\n\n min_x, max_x = X.min(), X.max()\n len_x = max_x - min_x\n \n bestd = 1\n bestp = min_x\n minerr = W.sum()\n\n if len_x > 0.0:\n for p in np.arange(min_x, max_x, len_x/steps):\n for d in [-1, 1]:\n gy = np.ones((y.size))\n gy[X*d < p*d] = -1\n err = np.sum((gy != y)*W)\n if err < minerr:\n minerr = err\n bestd = d\n bestp = p\n\n return minerr, bestd, bestp", "def forward_substitution(self):\r\n for col in range(0, self.SIZE):\r\n self.check_solvability(self.matrix[col][col], self.result[col])\r\n self.result[col] = self.divide(self.result[col], self.matrix[col][col])\r\n for row in range(col + 1, self.SIZE):\r\n self.result[row] -= (self.result[col] * self.matrix[row][col])\r\n return self.result", "def linear_regression_noreg(X, y):\n X_trans = X.transpose()\n inverse = np.linalg.inv(np.dot(X_trans, X))\n w = inverse.dot(X_trans).dot(y)\n return w" ]
[ "0.5769132", "0.5621818", "0.5586707", "0.54072565", "0.5397549", "0.5358403", "0.5319366", "0.5261717", "0.5255339", "0.52463937", "0.5175605", "0.5172019", "0.516699", "0.5155215", "0.51417834", "0.5139273", "0.5113438", "0.5082017", "0.5073561", "0.5043652", "0.502834", "0.50264645", "0.5013754", "0.50127375", "0.49776477", "0.4976712", "0.49661064", "0.49659896", "0.49547422", "0.49432433" ]
0.6251233
0
Requests a new ip for the device
def request_new_ip(self, mac): self.execute_script('new_ip', mac)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def purchase_ip(self, debug=False):\n json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')\n json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)\n try:\n ip = Ip()\n ip.ip_addr = json_obj['Value']['Value']\n ip.resid = json_obj['Value']['ResourceId']\n return ip\n except:\n raise Exception('Unknown error retrieving IP.')", "def ip(self, request):\n ip = get_real_ip(request) or 'undefined'\n debug_logger.debug(\"IP request from : \" + ip)\n return Response({'ip': ip})", "def get_new_ip(self):\n attempts = 0\n\n while True:\n if attempts == self.new_ip_max_attempts:\n raise TorIpError(\"Failed to obtain a new usable Tor IP\")\n\n attempts += 1\n\n try:\n current_ip = self.get_current_ip()\n except (RequestException, TorIpError):\n self._obtain_new_ip()\n continue\n\n if not self._ip_is_usable(current_ip):\n self._obtain_new_ip()\n continue\n\n self._manage_used_ips(current_ip)\n break\n\n return current_ip", "def new_ip(self, ip):\n if not ip in self.ip_list:\n self.ip_list.add(ip)\n host = self.hs.id_to_object(ip)\n host.add_tag('sniffer')\n host.save()\n print_success(\"New ip address: {}\".format(ip))", "def create(self, ip): # pylint: disable=invalid-name\n return self.request(\"POST\", data={\"ip\": ip})", "def get_ip(self):", "def change_IP(self,server_IP,MAC):\n content = {'server_IP':server_IP,'MAC_address':MAC}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n #address will be given by the api\n r = requests.post(f\"http://{self.webserver_address}/api/camera/update_ip\", data = content,headers = headers,verify=False)\n if(r.status_code == 200):\n return True\n return False", "def new_ip(address):\n return IPy.IP(address)", "def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def newSDDCPublicIP(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n ip_id = kwargs['ip_id']\n json_data = {\n \"display_name\" : ip_id \n }\n json_response_status_code = put_sddc_public_ip_json(proxy, sessiontoken, ip_id, json_data)\n if json_response_status_code == 200:\n print(f'Public IP {ip_id} successfully updated.')\n params = {'proxy':proxy, 'sessiontoken':sessiontoken}\n getSDDCPublicIP(**params)\n else:\n print(\"Issues updating the IP - please check your syntax and try again.\")\n sys.exit(1)", "def test_try_create_auto_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_auto_net_free.json'\n\n # Does get request\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(201, response.status_code)\n\n url = prepare_url('/api/v3/ipv4/%s/' % response.data[0]['id'],\n fields=['ip_formated'])\n response = self.client.get(\n url,\n content_type='application/json')\n\n self.compare_status(200, response.status_code)\n self.compare_values('10.0.1.2', response.data['ips'][0]['ip_formated'])", "def get(self, url: str) -> None:\n\n if self.number_of_requests_made % self.change_ip_after == 0:\n self.renew_ip()\n\n else:\n self.wait()\n\n self.last_call_timestamp = round(time.time(), 2)\n super().get(url)\n\n self.number_of_requests_made += 1", "def set_ipaddress(modulo):\n\n print ('Configuring IP address...')\n\n modulo.write('AT+NETOPEN\\r\\n'.encode())\n\n if _valid_net(modulo): \n try:\n modulo.write('AT+IPADDR\\r\\n'.encode())\n time.sleep(0.1)\n except serial.SerialException:\n print ('... Whitout IP address, try again')\n if _valid_ip(modulo):\n print ('IP address configurated')\n else:\n print ('IP not configurated')\n else:\n print ('Net Already configurated')\n \n data = _read_line(modulo)\n return data", "def change_ip(self, address: int) -> None:\n self.regs[\"ip\"].write(address)", "def ip(self, ip):\n self._ip = ip\n return self", "def setIpaddr(self):\n\t\tself.ipaddr = self.settings.getKeyValue('ipaddr')\n\t\tself.socket.send('setenv ipaddr ' + self.ipaddr+'\\r', 1)\t\t\n\t\treturn None", "def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True", "def random_ip():\n return new_ip(\"%i.%i.%i.%i\" % (randint(1, 254), # nosec\n randint(1, 254), # nosec\n randint(1, 254), # nosec\n randint(1, 254))) # nosec", "def ip_register(self, ip=None):\n if ip is None:\n self.request('/v1.1/register', 'POST')\n else:\n self.request('/v1.1/register/%s' % ip, 'POST')", "def ip_assign(ip_info, server, notes=\"\", usage=\"Dedicated IP\"):\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # build POST request data\n rdata = {\n 'id': ip_info['id'],\n 'ip': ip_info['ip'],\n 'net': ip_info['net'],\n 'host': \"\",\n 'server': server,\n 'notes': notes,\n 'usage': usage,\n 'uname': udata.userauth['user']\n }\n\n # send request for IP\n vpx = sc.post('https://imhsc.imhadmin.net/modules/IPManager/ipm_ipedit.php', data=rdata)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"lxml\")\n\n # get assignment results\n o_ip = bs.find_all('tr')[0].find_all('td')[1].string\n o_netmask = bs.find_all('tr')[1].find_all('td')[1].string\n o_gateway = bs.find_all('tr')[2].find_all('td')[1].string\n\n if o_ip != ip_info['ip']:\n print(\"!! Warning: Requested IP does not match IP from assignment response (%s)\" % (o_ip))\n\n return {'ip': o_ip, 'netmask': o_netmask, 'gateway': o_gateway}", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def allocate_external_ip(self) -> dto.ExternalIp:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def allocate_floating_ip(self, context):\n # NOTE(vish): We don't know which network host should get the ip\n # when we allocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n return rpc.call(context,\n FLAGS.network_topic,\n {'method': 'allocate_floating_ip',\n 'args': {'project_id': context.project_id}})", "def get(self, request):\n content = {'ip': socket.gethostbyname(socket.gethostname())}\n return Response(content)", "def known_ip(ip=DEFAULT_IP):\r\n tunnel(ip)", "def gen_ip(self):\n\n try:\n self.ip = self.auth_url.split(\":\")[1].strip(\"//\")\n except Exception:\n self.ip = socket.gethostbyname(socket.gethostname())\n print \"\\t! Error obtaining ip address from cred file. Using %s\" % (self.ip)", "def set_static_ip_address(self, context, msg):\n args = jsonutils.loads(msg)\n macaddr = args.get('mac')\n ipaddr = args.get('ip')\n LOG.debug('set_static_ip_address received: %(mac)s %(ip)s', (\n {'mac': macaddr, 'ip': ipaddr}))\n\n # Add the request into queue for processing.\n event_type = 'cli.static_ip.set'\n payload = {'mac': macaddr, 'ip': ipaddr}\n timestamp = time.ctime()\n data = (event_type, payload)\n pri = self.obj.PRI_LOW_START\n self.obj.pqueue.put((pri, timestamp, data))\n LOG.debug('Added request to add static ip into queue.')\n\n return 0", "def getip(self):\n if configIpAddress == \"none\":\n strngtoXmit = 'M-SEARCH * HTTP/1.1' + '\\r\\n' + \\\n 'HOST: 239.255.255.250:1900' + '\\r\\n' + \\\n 'MAN: \"ssdp:discover\"' + '\\r\\n' + \\\n 'MX: 2' + '\\r\\n' + \\\n 'ST: urn:schemas-upnp-org:device:MediaRenderer:1' + '\\r\\n' + '\\r\\n'\n\n bytestoXmit = strngtoXmit.encode()\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(3)\n gotstr = 'notyet'\n found = False\n ipaddress = None\n sock.sendto(bytestoXmit, ('239.255.255.250', 1900))\n try:\n gotbytes, addressport = sock.recvfrom(512)\n gotstr = gotbytes.decode()\n except:\n sock.sendto(bytestoXmit, ('239.255.255.250', 1900))\n if re.search('LG', gotstr):\n ipaddress, _ = addressport\n found = True\n self._state = STATE_PLAYING\n else:\n gotstr = 'notyet'\n sock.close()\n if not found:\n print(\"LG TV not found\")\n ipaddress = None\n self._state = STATE_OFF\n lgtv[\"ipaddress\"] = ipaddress\n else:\n lgtv[\"ipaddress\"] = configIpAddress\n if self.isOnline():\n self._state = STATE_PLAYING\n else:\n self._state = STATE_OFF", "def get_device_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n ip = sock.getsockname()[0]\n sock.close()\n return ip" ]
[ "0.64938843", "0.64519453", "0.6445656", "0.64216715", "0.62110883", "0.6202511", "0.61524814", "0.60697424", "0.6024123", "0.59993225", "0.5996937", "0.59684306", "0.5909357", "0.58596236", "0.583989", "0.58333933", "0.58167917", "0.58154434", "0.5806481", "0.579902", "0.5723828", "0.5722472", "0.5716659", "0.57039213", "0.5690786", "0.56731266", "0.56713223", "0.5667182", "0.565684", "0.5650928" ]
0.7935096
0
Change dhcp response time for device mac
def change_dhcp_response_time(self, mac, time): self.execute_script('change_dhcp_response_time', mac, time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop_dhcp_response(self, mac):\n self.change_dhcp_response_time(mac, -1)", "def dhcp(self, dhcp):\n\n self._dhcp = dhcp", "def dhcp_utilization(self, dhcp_utilization):\n\n self._dhcp_utilization = dhcp_utilization", "def dhcp_callback(self, state, target_mac=None, target_ip=None, exception=None):\n self.record_result('dhcp', info=target_mac, ip=target_ip, state=state, exception=exception)\n self.target_mac = target_mac\n self.target_ip = target_ip\n if exception:\n self._state_transition(_STATE.ERROR, _STATE.DHCP)\n self.runner.target_set_error(self.port_set, exception)\n else:\n self._state_transition(_STATE.BASE, _STATE.DHCP)", "def config_dhcpv6_options(ssh_conn_obj, ztp_params, config_params, options=dict(), cli_type=\"\"):\n cli_type = st.get_ui_type(config_params.dut, cli_type=cli_type)\n retry_count = config_params.retry_count if \"retry_count\" in config_params and config_params.retry_count else 0\n iteration = config_params.iteration if \"iteration\" in config_params and config_params.iteration else 300\n delay = config_params.delay if \"delay\" in config_params and config_params.delay else 3\n expect_reboot = True if \"expect_reboot\" in options and options [\"expect_reboot\"] else False\n st.log(config_params)\n if \"func_name\" in config_params:\n syslog_file_names = [\"syslog_1_{}\".format(config_params.func_name), \"syslog_{}\".format(config_params.func_name)]\n if \"json_content\" in config_params:\n file_path = basic_obj.write_to_json_file(config_params.json_content)\n st.log(file_path)\n if file_path:\n destination_path = \"{}{}/{}\".format(config_params.home_path, ztp_params.config_path, config_params.ztp_file)\n st.log(destination_path)\n basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)\n config_params.option_59_url = \"http://[{}]{}/{}\".format(config_params.static_ip, ztp_params.config_path, config_params.ztp_file)\n config_params.search_pattern = r'\\s*option\\s+dhcp6.boot-file-url\\s+\"\\S+\";'\n write_option_59_to_dhcp_server(ssh_conn_obj, config_params)\n basic_obj.service_operations(ssh_conn_obj, config_params.dhcp6_service_name, \"restart\", \"server\")\n if not verify_dhcpd_service_status(ssh_conn_obj, config_params.dhcpd6_pid):\n st.log(\"{} service is running which is not expected\".format(config_params.dhcp6_service_name))\n st.report_fail(\"service_running_not_expected\", config_params.dhcp6_service_name)\n reboot_type = config_params.reboot_type if \"reboot_type\" in config_params and config_params.reboot_type else \"normal\"\n if \"ztp_operation\" in config_params:\n config_params.ztp_operation = \"reboot\" if cli_type == \"klish\" else config_params.ztp_operation\n if config_params.ztp_operation == \"reboot\":\n basic_obj.remove_file(config_params.dut, config_params.config_db_path)\n st.reboot(config_params.dut, reboot_type, skip_port_wait=True)\n elif config_params.ztp_operation == \"run\":\n ztp_operations(config_params.dut, config_params.ztp_operation)\n else:\n st.log(\"ZTP operation is not mentioned hence rebooting the device ...\")\n basic_obj.remove_file(config_params.dut, config_params.config_db_path)\n st.reboot(config_params.dut, reboot_type, skip_port_wait=True)\n if \"reboot_on_success\" in options and options[\"reboot_on_success\"]:\n result = verify_ztp_status(config_params.dut, retry_count, iteration, delay, expect_reboot=expect_reboot, reboot_on_success=options[\"reboot_on_success\"], cli_type=cli_type)\n else:\n result = verify_ztp_status(config_params.dut, retry_count, iteration, delay, expect_reboot=expect_reboot, cli_type=cli_type)\n if not result:\n if \"logs_path\" in config_params and \"func_name\" in config_params:\n capture_syslogs(config_params.dut, config_params.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n if \"reboot_on_success\" in options and options[\"reboot_on_success\"]:\n reboot_obj.config_reload(config_params.dut)\n st.wait(5)\n if not ip_obj.ping(config_params.dut, config_params.static_ip, family=\"ipv6\"):\n st.log(\"Pinging to DHCP server failed from DUT, issue either with DUT or server\")\n # intf_obj.enable_dhcp_on_interface(config_params.dut, config_params.network_port, \"v6\")\n if not verify_ztp_status(config_params.dut, retry_count, iteration, delay, cli_type=cli_type):\n if \"logs_path\" in config_params and \"func_name\" in config_params:\n capture_syslogs(config_params.dut, config_params.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n verify_ztp_filename_logs(config_params.dut, config_params)\n if \"ztp_log_string\" in config_params and config_params.ztp_log_string:\n if not basic_obj.poll_for_error_logs(config_params.dut, config_params.ztp_log_path, config_params.ztp_log_string):\n st.log(\"ZTP log {} verification failed for message {}\".format(config_params.ztp_log_path, config_params.ztp_log_string))\n if not basic_obj.poll_for_error_logs(config_params.dut, config_params.ztp_log_path_1, config_params.ztp_log_string):\n st.log(\"ZTP log {} verification failed for message {}\".format(config_params.ztp_log_path_1, config_params.ztp_log_string))\n st.report_fail(\"ztp_log_verification_failed\", config_params.ztp_log_path_1, config_params.ztp_log_string)\n if \"result\" in config_params and config_params.result == \"pass\":\n st.report_pass(\"test_case_passed\")", "def dhcp_used(self, dhcp_used):\n\n self._dhcp_used = dhcp_used", "def renew_dhcp_lease(self):\n\t\tresponse = os.system(\"/sbin/dhclient -r;/sbin/dhclient\")\n\t\tif response != 0:\n\t\t\tprint \"Network restart failed. DHCP Lease failed.\"", "def configureDHCP():\n dhcpStart = config.get(\"hotspot\", \"dhcpstart\")\n dhcpEnd = config.get(\"hotspot\", \"dhcpend\")\n dnsmasqConfig = f\"\"\"#PI Hotspot config\ndomain-needed\nbogus-priv\ndhcp-option=option:dns-server\ndhcp-authoritative\ndhcp-range={dhcpStart},{dhcpEnd},1h\n\"\"\"\n confFile = open(\"/etc/dnsmasq.conf\", \"w\")\n confFile.write(dnsmasqConfig)\n confFile.close()", "def config_and_verify_dhcp_option(ssh_conn_obj, dut, ztp_params, data, expect_reboot=False, reboot_on_success=list(), cli_type=\"\"):\n cli_type = st.get_ui_type(dut,cli_type=cli_type)\n cli_type = \"klish\" if cli_type in [\"rest-put\", \"rest-patch\"] else cli_type\n retry_count = data.retry_count if \"retry_count\" in data and data.retry_count else 0\n iteration = data.iteration if \"iteration\" in data and data.iteration else 300\n delay = data.delay if \"delay\" in data and data.delay else 3\n if \"func_name\" in data:\n syslog_file_names = [\"syslog_1_{}\".format(data.func_name), \"syslog_{}\".format(data.func_name)]\n # basic_obj.copy_config_db_to_temp(dut, data.config_db_path, data.config_db_temp)\n if \"config_file_type\" in data and data.config_file_type == \"text\":\n file_path = \"/tmp/file_temp.json\"\n basic_obj.write_to_file(ssh_conn_obj, data.json_content, file_path, device=\"server\")\n elif \"config_file_type\" in data and data.config_file_type == \"EoL\":\n file_path = \"\"\n else:\n file_path = basic_obj.write_to_json_file(data.json_content)\n if file_path:\n destination_path = \"{}{}/{}\".format(ztp_params.home_path, ztp_params.config_path, data.config_file)\n basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)\n if \"config_db_location\" in data and data.config_db_location == \"json\":\n st.download_file_from_dut(dut, data.config_db_temp, file_path)\n destination_path = \"{}{}/{}\".format(ztp_params.home_path, ztp_params.config_path, data.config_db_file_name)\n basic_obj.copy_file_from_client_to_server(ssh_conn_obj, src_path=file_path, dst_path=destination_path)\n if \"scenario\" in data and data.scenario == \"invalid-json\":\n st.log(\"Writing invalid content to make invalid json ...\")\n basic_obj.write_to_file_to_line(ssh_conn_obj, \",\", 5, destination_path, \"server\")\n if data.option_type == \"67\":\n st.log(\"Creating {} file on DHCP server ...\".format(data.config_file))\n data.search_pattern = r'\\s*option\\s+bootfile-name\\s*\\S*\\s*\"\\S+\";'\n data.option_string = \"option bootfile-name\"\n if data.type == \"http\":\n data.option_url = \"http://{}{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n elif data.type == \"tftp\":\n data.option_url = \"tftp://{}/{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n elif data.type == \"ftp\":\n data.option_url = \"ftp://{}/{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n write_option_to_dhcp_server(ssh_conn_obj, data)\n basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)\n if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):\n st.log(\"{} service not running\".format(data.dhcp_service_name))\n st.report_fail(\"service_not_running\", data.dhcp_service_name)\n # write_option_67_to_dhcp_server(ssh_conn_obj, data)\n data.device_action = \"reboot\" if cli_type == \"klish\" else data.device_action\n if data.device_action == \"reboot\":\n reboot_type = data.reboot_type if \"reboot_type\" in data and data.reboot_type else \"normal\"\n basic_obj.remove_file(dut, data.config_db_path)\n st.reboot(dut, reboot_type, skip_port_wait=True)\n st.wait_system_status(dut, 500)\n elif data.device_action == \"run\":\n ztp_operations(dut, data.device_action)\n if \"band_type\" in data and data.band_type==\"inband\":\n if not basic_obj.poll_for_system_status(dut):\n st.log(\"Sytem is not ready ..\")\n st.report_env_fail(\"system_not_ready\")\n if not basic_obj.check_interface_status(dut, ztp_params.oob_port,\"up\"):\n basic_obj.ifconfig_operation(dut, ztp_params.oob_port, \"down\")\n interface_status = basic_obj.check_interface_status(dut, ztp_params.inband_port, \"up\")\n if interface_status is not None:\n if not interface_status:\n intf_obj.interface_noshutdown(dut, ztp_params.inband_port, cli_type=cli_type)\n if \"service\" in data and data.service == \"disable\":\n basic_obj.service_operations_by_systemctl(dut, \"ztp\", \"stop\")\n if basic_obj.verify_service_status(dut, \"ztp\"):\n st.log(\"ZTP status is not stopped\")\n st.report_fail(\"service_not_stopped\", \"ztp\")\n basic_obj.service_operations_by_systemctl(dut, \"ztp\", \"start\")\n if not poll_ztp_status(dut, [\"IN-PROGRESS\", \"Not Started\", \"SUCCESS\"], cli_type=cli_type):\n st.report_fail(\"ztp_max_polling_interval\")\n if \"check\" in data and data.check == \"not\":\n if verify_ztp_status(dut, retry_count, iteration, delay, cli_type=cli_type):\n if \"logs_path\" in data and \"func_name\" in data:\n capture_syslogs(dut, data.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n else:\n st.log(\"Iteration count {}\".format(iteration))\n st.log(\"REBOOT ON SUCCESS - {}\".format(reboot_on_success))\n if reboot_on_success:\n if \"configdb-json\" in reboot_on_success:\n st.wait_system_reboot(dut)\n st.wait_system_status(dut, 300)\n result = verify_ztp_status(dut, retry_count, iteration, delay, expect_reboot=expect_reboot, reboot_on_success=reboot_on_success, cli_type=cli_type)\n else:\n result = verify_ztp_status(dut, retry_count, iteration, delay, expect_reboot=expect_reboot, cli_type=cli_type)\n if not result:\n if \"logs_path\" in data and \"func_name\" in data:\n capture_syslogs(dut, data.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n if reboot_on_success:\n output = show_ztp_status(dut, cli_type=cli_type)\n if output[\"status\"] != \"SUCCESS\":\n st.wait(300, \"Waiting for device to reboot after success...\")\n st.wait_system_status(dut, 300)\n # st.wait_system_reboot(dut)\n if not verify_ztp_status(dut, retry_count, iteration, delay, cli_type=cli_type):\n if \"logs_path\" in data and \"func_name\" in data:\n capture_syslogs(dut, data.logs_path, syslog_file_names)\n st.log(\"ZTP status verification failed\")\n st.report_fail(\"ztp_status_verification_failed\")\n st.banner(boot_up_obj.sonic_installer_list(dut))\n verify_ztp_filename_logs(dut, data)\n if \"ztp_log_string\" in data and data.ztp_log_string:\n if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path, data.ztp_log_string):\n st.log(\"ZTP log {} verification failed for message {}\".format(data.ztp_log_path, data.ztp_log_string))\n if not basic_obj.poll_for_error_logs(dut, data.ztp_log_path_1, data.ztp_log_string):\n st.log(\"ZTP log {} verification failed for message {}\".format(data.ztp_log_path_1, data.ztp_log_string))\n st.report_fail(\"ztp_log_verification_failed\", data.ztp_log_path_1, data.ztp_log_string)\n if \"result\" in data and data.result == \"pass\":\n st.report_pass(\"test_case_passed\")", "def configure_radius_server_deadtime(device, server_time):\n try:\n device.configure([\n f\"radius-server deadtime {server_time} \"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure radius server deadtime'\n )", "def dhcp_lease_times(self):\n default_lease_time = input('enter a default lease time for dhcp\\n'\n 'default [800]: ')\n default = 800\n default_lease_time = set_values(default_lease_time, default, check='integer')\n max_lease_time = input('enter max lease time for dhcp\\n'\n 'default [7200]: ')\n default = 7200\n max_lease_time = set_values(max_lease_time, default, check='integer')\n logging.info('adding default_lease_time: {} max_lease_time: {}'.format(default_lease_time,\n max_lease_time))\n self.inventory_dict['csah']['vars']['default_lease_time'] = default_lease_time\n self.inventory_dict['csah']['vars']['max_lease_time'] = max_lease_time", "def dhcp_renew(ifname):\n\n logging.debug('Renewing %s DHCP lease...', ifname)\n\n try:\n subprocess.call(['dhcpcd', '--rebind', ifname])\n except OSError, err:\n if err.errno != errno.ENOENT:\n raise err\n\n try:\n call_timeout(['dhclient', '-1', ifname], timeout=5)\n except OSError, err:\n if err.errno == errno.ENOENT:\n logging.critical('Neither dhcpcd nor dhclient were found.')\n else:\n raise err", "def udp_timeout(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n seconds = 300\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_udp_timeout\": seconds})", "def answerDHCP(self, shouldAnswer):\n assert False, \"Deriving class must implement\"", "async def test_dhcp(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n data=dhcp.DhcpServiceInfo(\n ip=\"1.2.3.4\", macaddress=\"01:23:45:67:89:ab\", hostname=\"mock_hostname\"\n ),\n context={\"source\": config_entries.SOURCE_DHCP},\n )\n\n assert result.get(\"type\") == FlowResultType.FORM\n assert result.get(\"step_id\") == \"user\"", "def __init__(self):\n self.dhcp_client_state = store.MacToIP() # mac => DHCP_State", "def elAddNetworkConfigurationWithDhcp(self, device):\n commandSection = self.sectionByName(\"command\")\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n deviceMatch = re.match(r\"([^0-9]+)([0-9])\", device)\n if deviceMatch:\n # e.g. \"eth0\"\n devicePrefix = deviceMatch.group(1)\n deviceNumber = deviceMatch.group(2)\n deviceNumber = int(deviceNumber)\n for i in range(8, deviceNumber - 1, -1):\n deviceI = devicePrefix + str(i)\n deviceIPlus1 = devicePrefix + str(i + 1)\n # move up by one device each network configuration\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--device[ \\t]*(?:=|[ \\t])[ \\t]*)\" + re.escape(deviceI) + r\"(.*)$\",\n r\"\\g<1>\" + deviceIPlus1 + r\"\\g<2>\",\n commandSection.string)\n # not --noipv6\n networkConfiguration = \"network --device=\" + device + \" --bootproto=dhcp --onboot=yes --activate\"\n if deviceMatch and deviceNumber == 0:\n # having configuration of eth0 first appears to be more conducive to overall success,\n # and also, per http://fedoraproject.org/wiki/Anaconda/Kickstart#network, supposedly\n # \"... in installer environment. Device of the first network command is activated if network is required,\n # e.g. in case of network installation ...\",\n commandSection.string = networkConfiguration + \"\\n\" \\\n + \"#\\n\" \\\n + commandSection.string\n else:\n commandSection.string = commandSection.string \\\n + \"#\\n\" \\\n + networkConfiguration + \"\\n\"", "def udp_timeout(ctx, seconds):\n config_db = ConfigDBConnector()\n config_db.connect()\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_udp_timeout\": seconds})", "def setMAC( self, intf, mac ):\n result = self.cmd( 'ifconfig', intf, 'down' )\n result += self.cmd( 'ifconfig', intf, 'hw', 'ether', mac )\n result += self.cmd( 'ifconfig', intf, 'up' )\n return result", "async def test_dhcp_renewal_match_hostname_and_macaddress(hass: HomeAssistant) -> None:\n integration_matchers = [\n {\"domain\": \"mock-domain\", \"hostname\": \"irobot-*\", \"macaddress\": \"501479*\"}\n ]\n\n packet = Ether(RAW_DHCP_RENEWAL)\n\n async_handle_dhcp_packet = await _async_get_handle_dhcp_packet(\n hass, integration_matchers\n )\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init:\n await async_handle_dhcp_packet(packet)\n # Ensure no change is ignored\n await async_handle_dhcp_packet(packet)\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.1.120\",\n hostname=\"irobot-ae9ec12dd3b04885bcbfa36afb01e1cc\",\n macaddress=\"50147903852c\",\n )", "def unconfigure_radius_server_deadtime(device, server_time):\n try:\n device.configure([\n f\"no radius-server deadtime {server_time}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not unconfigure radius server deadtime'\n )", "def write_option_to_dhcp_server(connection_obj, data):\n line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)\n option = data.option_string # \"option dhcp6.boot-file-url \"\n option_path = data.option_url\n st.log(\"#####LINE NUMBER{}\".format(line_number))\n option_config = \"'{} \\\"{}\\\";'\".format(option, option_path)\n if int(line_number) > 0:\n # line_number = data.line_number if line_number in data else 60\n basic_obj.delete_line_using_line_number(connection_obj, line_number, data.dhcp_config_file)\n basic_obj.write_to_file(connection_obj, option_config, data.dhcp_config_file, device=\"server\")\n # basic_obj.write_to_file_to_line(connection_obj, option_config, line_number, data.dhcp_config_file, device=\"server\")\n line_number = basic_obj.get_file_number_with_regex(connection_obj, data.search_pattern, data.dhcp_config_file)\n st.log(\"#####LINE NUMBER{}\".format(line_number))\n if line_number <= 0:\n st.log(\"Written content in file {} not found\".format(data[\"dhcp_config_file\"]))\n st.report_fail(\"content_not_found\")", "def set_ntp_sysctl(self):\n print \"Modification du sysctl\"\n self.exec_cmd(\"echo \\\"xen.independent_wallclock = 1\\\" >> %s/etc/sysctl.conf\" % self.rep_vhosts_vm)", "def tcp_time_updatetime(localport):\r\n\r\n # Get the ips and ports of servers hosting time_server.repy, retrying nine\r\n # times if there is an exception.\r\n gotval = False\r\n attemptretrieval = 0\r\n while attemptretrieval < 2:\r\n try:\r\n serveraddresses = advertise_lookup(\"time_server\")\r\n except Exception:\r\n attemptretrieval = attemptretrieval + 1\r\n sleep(2) # Look up the value again in 10 seconds\r\n else:\r\n if serveraddresses != [] and serveraddresses[0] != '':\r\n gotval = True\t # Successfully obtained the value\r\n break\r\n else:\r\n attemptretrieval = attemptretrieval + 1\r\n\r\n\r\n if not gotval:\r\n raise Exception(\"Unable to locate any servers running time_server.repy\")\r\n\r\n\r\n timelength = 25 # Max length of string, representing the time, to be received\r\n shuffledserveraddresses = random_sample(serveraddresses,min(len(serveraddresses),5))\r\n\r\n # Open a connection with a random server hosting time_server.repy\r\n timeobtained = False\r\n serverindex = 0\r\n while serverindex < len(shuffledserveraddresses):\r\n remoteaddress = shuffledserveraddresses[serverindex].split(':')\r\n remoteip = remoteaddress[0]\r\n remoteport = int(remoteaddress[1])\r\n\r\n try:\r\n sockobject = timeout_openconn(remoteip,remoteport)\r\n except Exception:\r\n serverindex +=1\r\n else:\r\n timeobtained = True\r\n break\r\n\r\n\r\n if not timeobtained:\r\n raise Exception(\"Unable to open connection with any of the \",len(shuffledserveraddresses),\"servers running time_server.repy.\")\r\n\r\n\r\n currenttime =''\r\n while '$' not in currenttime:\r\n currenttime += sockobject.recv(20)\r\n sockobject.close()\r\n currenttime = float(currenttime[:-1])\r\n\r\n # finally, set the time\r\n time_settime(currenttime)\r\n\r\n return shuffledserveraddresses[serverindex]", "def configure_lldp_holdtime(device, timer):\r\n try:\r\n device.configure(f'lldp holdtime {timer}')\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Could not configure LLDP holdime\"\r\n \"Error: {error}\".format(error=e)\r\n )", "def dhcp_total(self, dhcp_total):\n\n self._dhcp_total = dhcp_total", "def request_new_ip(self, mac):\n self.execute_script('new_ip', mac)", "def change_IP(self,server_IP,MAC):\n content = {'server_IP':server_IP,'MAC_address':MAC}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n #address will be given by the api\n r = requests.post(f\"http://{self.webserver_address}/api/camera/update_ip\", data = content,headers = headers,verify=False)\n if(r.status_code == 200):\n return True\n return False", "def unconfigure_lldp_timer(device): \r\n try:\r\n device.configure('no lldp timer')\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Could not unconfigure LLDP timer\"\r\n \"Error: {error}\".format(error=e)\r\n )", "def write_option_67_to_dhcp_server(ssh_conn_obj, data):\n option_67_config = \"option bootfile-name\"\n if data.type == \"http\":\n config_json_url = \"http://{}{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n elif data.type == \"tftp\":\n config_json_url = \"tftp://{}/{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n elif data.type == \"ftp\":\n config_json_url = \"ftp://{}/{}/{}\".format(data.static_ip, data.config_path, data.config_file)\n option_67_config_string = '{} \"{}\";'.format(option_67_config, config_json_url)\n if not basic_obj.write_update_file(ssh_conn_obj, option_67_config,\n option_67_config_string, data.dhcp_config_file):\n st.log(\"Written content in file {} not found\".format(data.dhcp_config_file))\n st.report_fail(\"content_not_found\")\n basic_obj.service_operations(ssh_conn_obj, data.dhcp_service_name, data.action, data.device)\n if not verify_dhcpd_service_status(ssh_conn_obj, data.dhcpd_pid):\n st.log(\"{} service not running\".format(data.dhcp_service_name))\n st.report_fail(\"service_not_running\", data.dhcp_service_name)" ]
[ "0.68185097", "0.61317915", "0.60052145", "0.5939863", "0.5885967", "0.5811359", "0.58035165", "0.57373697", "0.5646604", "0.5625567", "0.55827594", "0.55206007", "0.5491405", "0.5381941", "0.53621304", "0.5349844", "0.53050417", "0.52728444", "0.52219707", "0.5220993", "0.52162665", "0.51927435", "0.5168878", "0.516402", "0.5163542", "0.5156972", "0.51473045", "0.51466286", "0.51435393", "0.5127612" ]
0.82894284
0
Stops DHCP response for the device
def stop_dhcp_response(self, mac): self.change_dhcp_response_time(mac, -1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n\n if not self._dhcp_client_ctrl is None:\n self._dhcp_client_ctrl.exit()\n if not self._slave_dhcp_process is None:\n self._slave_dhcp_process.kill()\n logger.debug('DHCP client stopped on ' + self._ifname)\n \n self._new_lease_event.clear()\n self._dhcp_client_ctrl = None # Destroy the control object\n self._slave_dhcp_process = None # Destroy the slave DHCP object", "def stop(self):\n self.stopped = True\n self.broadcast('host down')", "def stop_advertising(self):\n self._periph.stop_advertising()", "def stop(self,c,ADDR):\r\n if self.device_detected == True:\r\n resp = yield subprocess.check_output(\"cacli STP \" + str(ADDR))\r\n #print resp\r\n else:\r\n resp = \"Device not connected.\"\r\n print \"Device not connected. \"\r\n returnValue(resp)", "def stop_device(self):\n\n self.state = 'stopped'", "def exit(self):\n if self._dbus_iface is None:\n raise Exception('Method invoked on non existing D-Bus interface')\n self._dbus_iface.Release(reply_handler = self._exitUnlock, error_handler = self._exitUnlock) # Call Exit() but ignore whether it gets acknowledged or not... this is because slave process may terminate before even acknowledge\n self._exit_unlock_event.wait(timeout = 5) # Give 5s for slave to acknowledge the Exit() D-Bus method call... otherwise, ignore and continue\n # Once we have instructed the slave to send a Release, we can stop our own D-Bus loop (we won't communicate with the slave anymore)\n # Stop the dbus loop\n if not self._dbus_loop is None:\n self._dbus_loop.quit()\n \n self._dbus_loop = None\n \n logger.debug('Sending Exit() to remote DHCP client')\n self._exit_unlock_event.clear()", "def stop(self):\n\n command = [0x00, 0x00, 0x00, 0x00]\n self.send_command(command)", "def stop(self):\n self.stopping = True\n self.queue_response(exc=ClientError())", "def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=True,\n kill_string=IperfServer.KILL_STRING)\n self.child_pid = None", "def ipsec_down(self, **kwargs):\r\n\r\n conn_name = kwargs.get('connection_name', self.conn_name)\r\n cmd = 'ipsec down ' + conn_name\r\n self.linux_handle.log(\"Stoping ipsec connection : \" + cmd)\r\n result = self.linux_handle.shell(command=cmd).response()\r\n if re.search(r'successfully', result):\r\n self.linux_handle.log(\"ipsec connection closed successful: \" + result)\r\n return True\r\n else:\r\n self.linux_handle.log(level='ERROR', message='ipsec stop connection failed: ' + result)\r\n raise Exception('ipsec stop failed: ' + result)", "def do_stop(self):\n debug(\"CBA4.do_stop()\")\n if (self.__thread and self.__thread.isAlive()):\n self.__thread.stop()\n self.__thread.join(None)\n self.__thread = None\n\n if (self.is_valid()):\n tx = bytearray(16)\n tx[0] = 0x53\n tx[1] = 1\n self.get_status_response(tx)\n #end do_stop()", "def Stop(self):\n if self.child_pid:\n self.data = self.host.Communicate(self.child_pid, echo_error=True,\n kill=(not self.length),\n kill_string=IperfClient.KILL_STRING)\n self.child_pid = None", "def _stop_device(self):\r\n with self._driver_lock:\r\n m = self._lib.ps2000aStop(self._handle)\r\n check_result(m)", "def _ap_stop(self):\n logger.info('Stopping access point')\n call(['service', 'hostapd', 'stop'])\n call(['service', 'dnsmasq', 'stop'])\n\n self._disable_wifi_ap()\n\n logger.info('Access point disabled')", "def stop(self):\n self.udpSock.close()", "def stopwasp():\n\n\trespond = send_command('stopwasp')", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def stopVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/stop\" % (node,vmid), post_data)\n return data", "def stop_advertisement(self, unit_id):\n \n url = self.domain_ext + 'unit/view/%s/virtasement' % unit_id\n data = {'cancel': 1}\n return self.session.post(url, data=data)", "def request_stop(self):\n self._messaged.emit((\"stop\",None,0,None))", "def stop(self):\n self.halt = True", "def _stop(self, host):\n pass", "def stop(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_stop(self)", "def InterfaceClientStop(self, exitCode=200): \n pass", "def daemonControlStop (self):\n self.stop()", "def get_stop_response():\n\n speech_output = STOP_MESSAGE\n return response(speech_response(speech_output, True))", "def get_stop_response():\n\n speech_output = STOP_MESSAGE\n return response(speech_response(speech_output, True))", "def ec2_stop(resource, metadata):\n instances = resource.instances.filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'tag:Name', 'Values': [metadata['fqdn']]}, ])\n\n for instance in instances:\n print(\"Terminating vm id {0} name {1}\".format(instance.id, instance.tags[0]['Value']))\n # resource.instances.filter(InstanceIds=[instance.id]).stop()\n resource.instances.filter(InstanceIds=[instance.id]).terminate()", "def stop(self):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_stop(self)", "def detach(target, sysip):\n click.secho(\"Attempting to detach template.\")\n\n payload = {\n \"deviceType\":\"vedge\",\n \"devices\":[ \n {\n \"deviceId\":str(target),\n \"deviceIP\":str(sysip)\n }\n ]\n }\n\n url = base_url + \"/template/config/device/mode/cli\"\n\n response = requests.post(url=url, data=json.dumps(payload), headers=header, verify=False)\n if response.status_code == 200:\n id = response.json()[\"id\"]\n url = base_url + \"/device/action/status/\" + str(id)\n while(1):\n status_res = requests.get(url,headers=header,verify=False)\n if status_res.status_code == 200:\n push_status = status_res.json()\n if push_status['summary']['status'] == \"done\":\n if 'Success' in push_status['summary']['count']:\n print(\"Changed configuration mode to CLI\")\n elif 'Failure' in push_status['summary']['count']:\n print(\"Failed to change configuration mode to CLI\")\n exit()\n break\n else:\n print(\"Failed to detach template with error \" + response.text)\n exit()" ]
[ "0.7163312", "0.631082", "0.6104016", "0.60529596", "0.60524225", "0.6041364", "0.6019232", "0.5972226", "0.592156", "0.5913132", "0.590311", "0.5891132", "0.589016", "0.58840525", "0.58741194", "0.58572394", "0.582723", "0.5790126", "0.57324284", "0.5713386", "0.5644416", "0.56414205", "0.5618038", "0.560242", "0.5601144", "0.55904114", "0.55904114", "0.5576033", "0.5565405", "0.55486965" ]
0.8130744
0
Change dhcp range for devices
def change_dhcp_range(self, start, end, prefix_length): self.execute_script('change_dhcp_range', start, end, prefix_length)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dhcp_range(options, index):\n second_octet = 160 + index\n return \"192.%s.1.2-192.%s.255.254\" % (second_octet, second_octet)", "def dhcp(self, dhcp):\n\n self._dhcp = dhcp", "def configureDHCP():\n dhcpStart = config.get(\"hotspot\", \"dhcpstart\")\n dhcpEnd = config.get(\"hotspot\", \"dhcpend\")\n dnsmasqConfig = f\"\"\"#PI Hotspot config\ndomain-needed\nbogus-priv\ndhcp-option=option:dns-server\ndhcp-authoritative\ndhcp-range={dhcpStart},{dhcpEnd},1h\n\"\"\"\n confFile = open(\"/etc/dnsmasq.conf\", \"w\")\n confFile.write(dnsmasqConfig)\n confFile.close()", "def dhcp_utilization(self, dhcp_utilization):\n\n self._dhcp_utilization = dhcp_utilization", "def set_range(self, new_range):\n self.range = new_range\n if new_range == 2:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x00')\n self.get_offset()\n elif new_range == 4:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x01')\n self.get_offset()\n elif new_range == 8:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x02')\n self.get_offset()\n elif new_range == 16:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x03')\n self.get_offset()\n else:\n print(\"range can be 2, 4, 8, or 16\")", "def dhcp_used(self, dhcp_used):\n\n self._dhcp_used = dhcp_used", "def change_default_range(networks, number_excluded_ips,\n cut_from_start=True):\n for default_network in filter(\n lambda x: ((x['name'] != 'fuelweb_admin')and\n (x['name'] != 'private')),\n networks):\n default_range = [netaddr.IPAddress(str(ip)) for ip\n in default_network[\"ip_ranges\"][0]]\n if cut_from_start:\n new_range = [default_range[0],\n default_range[0] + number_excluded_ips]\n else:\n new_range = [default_range[0] + number_excluded_ips + 1,\n default_range[1]]\n default_network[\"ip_ranges\"][0] = [str(ip)\n for ip in new_range]", "def setup_dhcp_config(self, board_config):\n raise NotImplementedError", "def set_dhcp_pools(self, cidr):\n start = str(ipaddress.IPv4Network(cidr)[50])\n end = str(ipaddress.IPv4Network(cidr)[200])\n return start, end", "def create_dhcp_pool(options, vsm_obj, range, default_gateway):\n edge = Edge(vsm_obj, '4.0')\n edge_id = get_edge(vsm_obj)\n edge.id = edge_id\n\n dhcp_py_dict = {\n 'enabled': True,\n 'logging': {'loglevel': 'info', 'enable': False},\n 'ippools': [\n {\n 'autoconfiguredns': True,\n 'defaultGateway': default_gateway,\n 'iprange': range,\n }\n ],\n }\n dhcp_client = DHCP(edge)\n print(\"Creating dhcp ippool with range %s\" % range)\n dhcp_schema_object = dhcp_client.get_schema_object(dhcp_py_dict)\n existing_dhcp_schema = dhcp_client.read()\n if existing_dhcp_schema and existing_dhcp_schema.ipPools:\n print \"append dhcp ippool to existing list\"\n dhcp_schema_object.ipPools = existing_dhcp_schema.ipPools + \\\n dhcp_schema_object.ipPools\n result = dhcp_client.create(dhcp_schema_object)\n\n if (result[0].response.status != 204):\n r_vars = vars(result[0])\n print(\"Create IP Pool error: %s\" % result[0].response.reason)\n print ', '.join(\"%s: %s\" % item for item in r_vars.items())\n return False\n return True", "def reconfigure_ml2_vlan_range(self):\n self.check_run('reconfigure_ml2_vlan_range')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('neutron')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n service_name = 'neutron-server'\n uptimes = self.get_service_uptime(controllers, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, service_name)\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n self.check_ml2_vlan_range(os_conn)\n\n self.env.make_snapshot(\"reconfigure_ml2_vlan_range\", is_make=True)", "def new_range(self, ip_range):\n if not ip_range in self.ip_ranges:\n self.ip_ranges.add(ip_range)\n doc = self.rs.id_to_object(ip_range)\n doc.add_tag('sniffer')\n doc.save()\n print_success(\"New ip range: {}\".format(ip_range))", "def set_dhcp_ipaddress(self, sDhcpIPAddress):\n\t\tcall_sdk_function('PrlVirtNet_SetDhcpIPAddress', self.handle, sDhcpIPAddress)", "def elAddNetworkConfigurationWithDhcp(self, device):\n commandSection = self.sectionByName(\"command\")\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n deviceMatch = re.match(r\"([^0-9]+)([0-9])\", device)\n if deviceMatch:\n # e.g. \"eth0\"\n devicePrefix = deviceMatch.group(1)\n deviceNumber = deviceMatch.group(2)\n deviceNumber = int(deviceNumber)\n for i in range(8, deviceNumber - 1, -1):\n deviceI = devicePrefix + str(i)\n deviceIPlus1 = devicePrefix + str(i + 1)\n # move up by one device each network configuration\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--device[ \\t]*(?:=|[ \\t])[ \\t]*)\" + re.escape(deviceI) + r\"(.*)$\",\n r\"\\g<1>\" + deviceIPlus1 + r\"\\g<2>\",\n commandSection.string)\n # not --noipv6\n networkConfiguration = \"network --device=\" + device + \" --bootproto=dhcp --onboot=yes --activate\"\n if deviceMatch and deviceNumber == 0:\n # having configuration of eth0 first appears to be more conducive to overall success,\n # and also, per http://fedoraproject.org/wiki/Anaconda/Kickstart#network, supposedly\n # \"... in installer environment. Device of the first network command is activated if network is required,\n # e.g. in case of network installation ...\",\n commandSection.string = networkConfiguration + \"\\n\" \\\n + \"#\\n\" \\\n + commandSection.string\n else:\n commandSection.string = commandSection.string \\\n + \"#\\n\" \\\n + networkConfiguration + \"\\n\"", "def setDomainRange(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()\n return", "def setup_dhcp6_config(self, board_config):\n raise NotImplementedError", "def setup_dhcp_env(device):\n raise NotImplementedError", "def set_accel_range(self, accel_range):\r\n # First change it to 0x00 to make sure we write the correct value later\r\n self.bus.write_byte_data(self.deviceaddress, self.accel_config, 0x00)\r\n\r\n # Write the new range to the ACCEL_CONFIG register\r\n self.bus.write_byte_data(self.deviceaddress, self.accel_config, accel_range)", "def dhcp_options_id(self, dhcp_options_id):\n self._dhcp_options_id = dhcp_options_id", "def str_to_range(lo, hi):\n x = rpki.ipaddrs.parse(lo)\n y = rpki.ipaddrs.parse(hi)\n assert type(x) == type(y)\n if isinstance(x, rpki.ipaddrs.v4addr):\n return rpki.resource_set.resource_range_ipv4(x, y)\n else:\n return rpki.resource_set.resource_range_ipv6(x, y)", "def update_cmts_isc_dhcp_config(self, board_config):\n self.setup_dhcp_config(board_config)\n self.setup_dhcp6_config(board_config)\n raise NotImplementedError", "def dhcp_lease_times(self):\n default_lease_time = input('enter a default lease time for dhcp\\n'\n 'default [800]: ')\n default = 800\n default_lease_time = set_values(default_lease_time, default, check='integer')\n max_lease_time = input('enter max lease time for dhcp\\n'\n 'default [7200]: ')\n default = 7200\n max_lease_time = set_values(max_lease_time, default, check='integer')\n logging.info('adding default_lease_time: {} max_lease_time: {}'.format(default_lease_time,\n max_lease_time))\n self.inventory_dict['csah']['vars']['default_lease_time'] = default_lease_time\n self.inventory_dict['csah']['vars']['max_lease_time'] = max_lease_time", "def renew_dhcp_lease(self):\n\t\tresponse = os.system(\"/sbin/dhclient -r;/sbin/dhclient\")\n\t\tif response != 0:\n\t\t\tprint \"Network restart failed. DHCP Lease failed.\"", "def set_accel_range(self, accel_range):\n\t\t# First change it to 0x00 to make sure we write the correct value later\n\t\tself.bus.write_byte_data(self.address, self.ACCEL_CONFIG, 0x00)\n\n\t\t# Write the new range to the ACCEL_CONFIG register\n\t\tself.bus.write_byte_data(self.address, self.ACCEL_CONFIG, accel_range)", "def change_adp(self, network: str):\r\n self.ip = network\r\n self.adp = self.ipv4_adp[network]\r\n self.mac = self.ipv4_mac[network].replace('-', ':')\r\n # print(self.adp, self.ip, self.mac)\r", "def set_configure_with_dhcp(self, bConfigureWithDhcp):\n\t\tcall_sdk_function('PrlVmDevNet_SetConfigureWithDhcp', self.handle, bConfigureWithDhcp)", "def dhcp_renew(ifname):\n\n logging.debug('Renewing %s DHCP lease...', ifname)\n\n try:\n subprocess.call(['dhcpcd', '--rebind', ifname])\n except OSError, err:\n if err.errno != errno.ENOENT:\n raise err\n\n try:\n call_timeout(['dhclient', '-1', ifname], timeout=5)\n except OSError, err:\n if err.errno == errno.ENOENT:\n logging.critical('Neither dhcpcd nor dhclient were found.')\n else:\n raise err", "def set_accel_range(self, accel_range):\n # First change it to 0x00 to make sure we write the correct value later\n self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, 0x00)\n\n # Write the new range to the ACCEL_CONFIG register\n self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, accel_range)", "def set_PWM_range(user_gpio, range_):\n return _u2i(_pigpio_command(_control, _PI_CMD_PRS, user_gpio, range_))", "def setIP( self, intf, ip, prefixLen=8 ):\n ipSub = '%s/%d' % ( ip, prefixLen )\n result = self.cmd( 'ifconfig', intf, ipSub, 'up' )\n self.ips[ intf ] = ip\n return result" ]
[ "0.6616823", "0.6526532", "0.64205295", "0.61709964", "0.5981741", "0.581122", "0.57029295", "0.56834716", "0.565777", "0.5584433", "0.54814816", "0.5453117", "0.5443394", "0.5424341", "0.5423709", "0.535401", "0.53214043", "0.5315264", "0.5313163", "0.53032523", "0.5302614", "0.5292723", "0.527363", "0.5224426", "0.5184081", "0.51797557", "0.5179698", "0.51784956", "0.51653194", "0.5139177" ]
0.82061505
0
Converts a single track record into m3u format. Need the normalization to fix the way Apple handles e.g. combining diacriticals.
def to_m3u_track(record: Dict[str, str]) -> str: location = normalize(unquote(record.get("Location"))) # m3u duration in seconds, not ms duration = int(record.get("Total Time")) // 1000 name = normalize(unquote(record.get("Name"))) artist = normalize(unquote( record.get("Artist") or record.get("Album Artist") or record.get("Composer", "") )) # print("Location {}".format(location)) return M3U_TRACK_TEMPLATE.format( length=duration, artist=artist, title=name, path=location )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_m3u_list(list_name: str, tracks: List[str]) -> str:\n\n return M3U_TEMPLATE.format(name=list_name, tracks=\"\\n\".join(tracks))", "def encodeMP3(self, wavf: str, dstf: str, cover: str, meta: TrackMeta) -> None:\n FNULL = open(os.devnull, 'w')\n subprocess.call(['lame', '-V2', wavf, dstf], stdout=FNULL, stderr=FNULL)\n FNULL.close()\n # tag MP3\n mm = TrackMeta(meta)\n mp3 = MP3(dstf, ID3=ID3)\n mp3[\"TIT2\"] = TIT2(encoding=3, text=mm.title())\n mp3[\"TPE1\"] = TPE1(encoding=3, text=mm.artist())\n mp3[\"TALB\"] = TALB(encoding=3, text=mm.album())\n mp3[\"TPE2\"] = TPE2(encoding=3, text=mm.albumartist())\n if mm.date():\n mp3[\"TDRC\"] = TDRC(encoding=3, text=mm.date())\n mp3[\"TRCK\"] = TRCK(encoding=3,\n text=mm.tracknumber() + \"/\" + mm.tracktotal())\n mp3[\"TPOS\"] = TPOS(encoding=3,\n text=mm.discnumber() + \"/\" + mm.disctotal())\n\n # composer\n if mm.composer():\n mp3[\"TCM\"] = TCM(encoding=3, text=mm.composer())\n\n # cover\n if cover:\n data = open(cover, 'rb').read()\n if cover.endswith('png'):\n mime = 'image/png'\n else:\n mime = 'image/jpeg'\n mp3.tags.add(APIC(encoding=3, mime=mime, type=3, desc=u'Cover', data=data))\n\n # save\n mp3.save()", "def convert(\n album,\n):\n for track in list_dir(album):\n ext = splitext(track)[1]\n if ext != \".mp3\":\n new_track = track.replace(ext, \".mp3\")\n if not exists(new_track):\n track_non_mp3 = AudioSegment.from_file(track, format=ext[1:])\n print(f\"{track} -> {new_track}\")\n track_non_mp3.export(new_track, format=\"mp3\")\n os.remove(track)", "def convert_AMUA3_gcm3(den_AMUA3):\n return den_AMUA3/const_avo*10.0", "def set_meta_mp3(file):\n\n list_str_prop_mp3 = ['album', 'artist', 'title']\n list_other_prop_mp3 = ['comment', 'genre', 'year']\n dict_file_mp3 = {}\n # For each string properties into the tag\n for prop in list_str_prop_mp3:\n # If the tag exist (i.e it's not empty for the music file)\n if file.tag.d.has_key(prop.upper()):\n # We delete spe char and we format it\n dict_file_mp3[prop] = delete_spe_char_and_format(file.tag[prop.upper()])\n else:\n # Or we define it's value as 'Unknow ' + prop\n # For instance 'Unknow Artist'\n dict_file_mp3[prop] = 'Unknow ' + prop.capitalize()\n # For each other properties\n for prop in list_other_prop_mp3:\n if file.tag.d.has_key(prop.upper()):\n # We just copy them\n dict_file_mp3[prop] = file.tag[prop.upper()]\n else:\n dict_file_mp3[prop] = ''\n # To try to find the tracknumber, we need 'title'\n if dict_file_mp3.has_key('title'): \n # But before, we delete the duplicate\n list_duplicate = [dict_file_mp3['artist'], dict_file_mp3['album']]\n # Now we delete the duplicates\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], list_duplicate)\n # So we are able to find the tracknumber\n number = ''\n # If ID3 already find it\n if file.tag.d.has_key(\"TRACKNUMBER\"):\n number = file.tag[\"TRACKNUMBER\"]\n # Else we try to find by ourself\n else:\n number = find_tracknumber(dict_file_mp3['title'])\n # If we found a tracknumber, we delete it from 'title'\n if number:\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], [number])\n dict_file_mp3['tracknumber'] = number\n # And we format the new title\n dict_file_mp3['title'] = build_track_name(dict_file_mp3['title'], number)\n dict_file_mp3['name'] = dict_file_mp3['title'] + '.mp3'\n dict_file_mp3['path'] = build_path([dict_file_mp3['artist'], dict_file_mp3['album']])\n return dict_file_mp3", "def convert_gcm3_AMUA3(den_gmc3):\n return den_gmc3*const_avo/10.0", "def test_load_mp3_file(self):\n track = Track.from_filename(self.track_path('silence.mp3'))\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Track')\n self.assertEqual(track.ensemble, 'Group')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.tracknum, 1)\n self.assertEqual(track.seconds, 2.0)", "def repr2to3 (v):\n if isinstance(v, six.string_types):\n qu = QuotedEscaped(v)\n if 'u' == qu[0]:\n return qu[1:]\n return qu\n if isinstance(v, six.integer_types):\n vs = repr(v)\n if vs.endswith('L'):\n return vs[:-1]\n return vs\n return repr(v)", "def m3u() -> Response:\n m3uText = \"#EXTM3U\\n\"\n for station in locast_service.get_stations():\n callsign = name_only(station.get(\"callSign_remapped\") or station.get(\n \"callSign\") or station.get(\"name\"))\n city = station[\"city\"]\n logo = station.get(\"logoUrl\") or station.get(\"logo226Url\")\n channel = station.get(\"channel_remapped\") or station[\"channel\"]\n networks = \"Network\" if callsign in [\n 'ABC', 'CBS', 'NBC', 'FOX', 'CW', 'PBS'] else \"\"\n groups = \";\".join(filter(None, [city, networks]))\n url = f\"http://{host_and_port}/watch/{station['id']}.m3u\"\n\n tvg_name = f\"{callsign} ({city})\" if config.multiplex else callsign\n\n m3uText += f'#EXTINF:-1 tvg-id=\"channel.{station[\"id\"]}\" tvg-name=\"{tvg_name}\" tvg-logo=\"{logo}\" tvg-chno=\"{channel}\" group-title=\"{groups}\", {callsign}'\n\n if config.multiplex:\n m3uText += f' ({city})'\n m3uText += f'\\n{url}\\n\\n'\n return m3uText", "def _convert_meta(m):\n # Decode Pascal style string with 4 bytes length field\n l = struct.unpack(\"<I\", m[:4])[0]\n return m[4:4+l]", "def parse_m3u8_line(line):\r\n # get a dictionary of attributes from line\r\n # examples:\r\n # {'TYPE': 'AUDIO', 'GROUP-ID': '160000mp4a.40.2', 'LANGUAGE': 'eng', 'NAME': 'eng'}\r\n # {'BANDWIDTH': '233728', 'AVERAGE-BANDWIDTH': '233728', 'RESOLUTION': '320x180', 'FRAME-RATE': '25.000', 'VIDEO-RANGE': 'SDR', 'CODECS': 'avc1.42C015,mp4a.40.2', 'AUDIO': '64000mp4a.40.2'}\r\n\r\n info = {}\r\n for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>\"[^\"]+\"|[^\",]+)(?:,|$)', line):\r\n if val.startswith('\"'):\r\n val = val[1:-1]\r\n info[key] = val\r\n return info", "def find_artist_playlist(data):\n\n return data['artist'].lower() + '.m3u'", "def create_m3u_file(\n file_name: str,\n song_list: List[Song],\n template: str,\n file_extension: str,\n short: bool = False,\n) -> str:\n\n m3u_content = create_m3u_content(song_list, template, file_extension, short)\n\n with open(file_name, \"w\", encoding=\"utf-8\") as m3u_file:\n m3u_file.write(m3u_content)\n\n return m3u_content", "def export_to_ascii(self):\n t3 = self.data.t3\n # get wavelength data\n wav = self.data.wavelength[\"WAVELENGTH_NAME\"].eff_wave[0]\n\n # output u1, v1, u2, v2, u3, v3, t3amp, t3phi, t3err\n t3data = [\n [\n t3[i].u1coord / wav,\n t3[i].v1coord / wav,\n t3[i].u2coord / wav,\n t3[i].v2coord / wav,\n -(t3[i].u1coord + t3[i].u2coord) / wav,\n -(t3[i].v1coord + t3[i].v2coord) / wav,\n t3[i].t3amp[0],\n t3[i].t3phi[0],\n t3[i].t3amperr[0],\n t3[i].t3phierr[0],\n ]\n for i in range(len(t3))\n ]\n\n self.t3data = np.array(t3data)\n return self.t3data", "def get_song(track):\n # Extract some identifying track information\n Title = track['name'].encode('utf-8')\n title = normalize(Title)\n Artist = [a['name'].encode('utf-8') for a in track['artists']]\n Album = track['name'].encode('utf-8')\n Popularity = track['popularity']\n # Put information into a namedTuple for convenience\n song = Track(Title, Artist, Album, Popularity, title)\n return song", "def convert_to_wav(mp3_filename):\n\n wav_filename = mp3_filename[:-4] + \".wav\"\n complete_mp3FileName = os.path.join(MP3_FOLDER, mp3_filename)\n complete_wavFileName = os.path.join(WAV_FOLDER, wav_filename)\n\n mp3_file = AudioSegment.from_mp3(complete_mp3FileName)\n mp3_file.export(complete_wavFileName, format=\"wav\")\n\n print(f\"The mp3 file {complete_mp3FileName} was successfully converted to \" \\\n + f\"the wav file {complete_wavFileName}.\")", "def find_album_playlist(data):\n\n return data['album'].lower() + '.m3u'", "def I3_u1(self) -> complex:\n # Should match in amplitude with \"Phase 3 current [A]\"\n # Convert from relative to U32 -> relative to U1\n return - (self.I3_u32() * cmath.rect(1, 90 / 180 * cmath.pi))", "def convert_3to1(seq):\n term_list = []\n for i in __kmers(seq,k=3):\n res = __get_value(i,aa3_to1_dict)\n term_list.append(res)\n return ''.join(term_list)", "def get_mp3_tags(mp3):\n _info = mutagen.File(mp3)\n a = _info.get('TPE1', ('UnknownArtist', ''))[0] # artist\n t = _info.get('TIT2', ('UnknownTitle', ''))[0] # title\n r = _info.get('TRCK', ('UnknownTrack', ''))[0] # track\n y = str(_info.get('TDRC', ('UnknownYear', ''))[0]) # year\n g = _info.get('TCON', ('UnknownGenre', ''))[0] # genre\n b = _info.get('TALB', ('UnknownAlbum', ''))[0] # album\n return a, t, r, y, g, b", "def notes2trk(notes):\n\n trk = MidiTrack()\n\n for i, note in enumerate(notes):\n if note[0] == 's': # Message for a silence\n trk.append(Message(\"note_on\", note=0, velocity=0, time=0))\n trk.append(Message(\"note_off\", note=0, velocity=0, time=note[1]))\n continue\n\n nlist = note[0].split(' ')\n\n for c_n in nlist: # Add the note on message\n n_nb, v_on, v_off = char2note(c_n)\n if len(note)==2:\n trk.append(Message(\"note_on\", note=n_nb, velocity=v_on, time=0))\n else:\n trk.append(Message(\"note_on\", note=n_nb, velocity=note[2],\n time=0))\n if len(note) == 4:\n trk.append(Message(\"note_off\", note=n_nb, velocity=v_off,\n time=note[1]))\n else:\n trk.append(Message(\"note_off\", note=n_nb, velocity=v_off,\n time=note[1]))\n for c_n in nlist[:-1]: # Add the note off message\n n_nb, v_on, v_off = char2note(c_n)\n if len(note) == 4: # For short note\n trk.append(Message(\"note_off\", note=n_nb, velocity=note[3],\n time=0))\n else:\n trk.append(Message(\"note_off\", note=n_nb, velocity=v_off,\n time=0))\n\n return trk", "def encode(record: Tuple[MeasureInput, MeasureResult]) -> str:\n return dump_record_to_string(*record)", "def convert_track(track_path):\n track_name, track_extension = os.path.splitext(track_path)\n converted_track = None\n if track_extension != \"\":\n track_name += \".ogg\"\n converted_track = AudioSegment.from_file(track_path,\n format=track_extension[1:])\n converted_track.export(track_name, format=\"ogg\")\n return converted_track, track_name", "def _to_cn(number):\n\n chinese_numeral_dict = {\n '0': '零',\n '1': '一',\n '2': '二',\n '3': '三',\n '4': '四',\n '5': '五',\n '6': '六',\n '7': '七',\n '8': '八',\n '9': '九'\n }\n chinese_unit_map = [('', '十', '百', '千'),\n ('万', '十万', '百万', '千万'),\n ('亿', '十亿', '百亿', '千亿'),\n ('兆', '十兆', '百兆', '千兆'),\n ('吉', '十吉', '百吉', '千吉')]\n chinese_unit_sep = ['万', '亿', '兆', '吉']\n\n reversed_n_string = reversed(str(number))\n\n result_lst = []\n unit = 0\n\n for integer in reversed_n_string:\n if integer is not '0':\n result_lst.append(chinese_unit_map[unit // 4][unit % 4])\n result_lst.append(chinese_numeral_dict[integer])\n unit += 1\n else:\n if result_lst and result_lst[-1] != '零':\n result_lst.append('零')\n unit += 1\n\n result_lst.reverse()\n\n # clean convert result, make it more natural\n if result_lst[-1] is '零':\n result_lst.pop()\n\n result_lst = list(''.join(result_lst))\n\n for unit_sep in chinese_unit_sep:\n flag = result_lst.count(unit_sep)\n while flag > 1:\n result_lst.pop(result_lst.index(unit_sep))\n flag -= 1\n\n '''\n length = len(str(number))\n if 4 < length <= 8:\n flag = result_lst.count('万')\n while flag > 1:\n result_lst.pop(result_lst.index('万'))\n flag -= 1\n elif 8 < length <= 12:\n flag = result_lst.count('亿')\n while flag > 1:\n result_lst.pop(result_lst.index('亿'))\n flag -= 1\n elif 12 < length <= 16:\n flag = result_lst.count('兆')\n while flag > 1:\n result_lst.pop(result_lst.index('兆'))\n flag -= 1\n elif 16 < length <= 20:\n flag = result_lst.count('吉')\n while flag > 1:\n result_lst.pop(result_lst.index('吉'))\n flag -= 1\n '''\n\n return ''.join(result_lst)", "def test_transform_track_title_based_on_artist_album_no_match_artist(self):\n track = Track(artist='Artist 2', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, pattern_album = 'Album',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_track_title_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 2', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, pattern_album = 'Album',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_load_mp3_file_total_tracks(self):\n track = Track.from_filename(self.track_path('silence-totalnum.mp3'))\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Track')\n self.assertEqual(track.tracknum, 1)\n self.assertEqual(track.seconds, 2.0)", "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def __n3_to_str(triple):\n s, p, o = triple\n s = s.n3()\n p = p.n3()\n o = o.n3()\n if s.startswith('<') and s.endswith('>'):\n s = s[1:len(s) - 1]\n if p.startswith('<') and p.endswith('>'):\n p = p[1:len(p) - 1]\n if o.startswith('<') and o.endswith('>'):\n o = o[1:len(o) - 1]\n return (s, p, o)", "def test_transform_track_title_based_on_artist_album_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, pattern_album = 'Album',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, True)" ]
[ "0.62157995", "0.5663488", "0.5658642", "0.5651403", "0.56042594", "0.55793566", "0.5382664", "0.53761894", "0.5295393", "0.52780515", "0.52740747", "0.517685", "0.5173452", "0.51395833", "0.50221336", "0.5019546", "0.501397", "0.50028616", "0.49967933", "0.49772906", "0.49313846", "0.49195954", "0.49189064", "0.49075663", "0.48895612", "0.48844042", "0.4874485", "0.4868547", "0.48620832", "0.48281404" ]
0.75607246
0
Converts a list of serialized m3u tracks into a playlist.
def to_m3u_list(list_name: str, tracks: List[str]) -> str: return M3U_TEMPLATE.format(name=list_name, tracks="\n".join(tracks))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def playlist(self):\n def iconv(s):\n encoding = self.options[\"id3_encoding\"]\n try:\n if encoding:\n return s.encode('latin1').decode(encoding).encode('utf-8')\n else:\n return s.encode('latin1')\n except UnicodeEncodeError:\n return \"\"\n\n lst = []\n r = self.x.playlist_list_entries()\n r.wait()\n for id in r.get_list():\n r = self.x.medialib_get_info(id)\n r.wait()\n if r.iserror():\n print r.get_error()\n lst.append(' ')\n continue\n song = r.get_propdict()\n try:\n artist = iconv(song[('plugin/id3v2', 'artist')])\n except KeyError:\n try:\n artist = iconv(song[('plugin/mad', 'artist')])\n except KeyError:\n artist = ''\n try:\n title = iconv(song[('plugin/id3v2', 'title')])\n except KeyError:\n try:\n title = iconv(song[('plugin/mad', 'title')])\n except KeyError:\n title = ''\n if artist == \"\" and title == \"\":\n name = os.path.split(song[('server', 'url')])[1]\n name = os.path.splitext(name)[0]\n name = urllib.unquote(name.decode('utf-8').encode('latin1'))\n name = name.replace(\"+\", \" \")\n lst.append(' ' + name)\n else:\n lst.append(' %s - %s' % (artist.ljust(6), title))\n\n return lst", "def parse_tracks(self, tracks_json: list):\n tracks = []\n for track in tracks_json:\n track_parsed = {\n 'id': track['id'],\n 'name': track['name'],\n }\n track_parsed['description'] = self.parse_description(track)\n track_parsed['image'] = track['album']['images'][0]['url']\n tracks.append(track_parsed)\n\n return tracks", "def save_playlist_tracks(playlist):\n results = sp.playlist_tracks(playlist)\n playlist_tracks = []\n\n while results['next']:\n for i in results['items']:\n track = i['track']\n playlist_tracks.append(track['id'])\n results = sp.next(results)\n\n file = save_to_json(playlist_tracks, f\"playlist_{playlist}.json\")\n return file", "def add_playlist_tracks(self, username, playlist_name, track_list):\n playlist_id = self.get_playlist_id(username, playlist_name)\n request_chunks = [track_list[i:i + 100] for i in range(0, len(track_list), 100)] # Blocks of 100 songs\n for track_chunk in request_chunks:\n self.spotify.user_playlist_add_tracks(username, playlist_id, track_chunk)", "def add_to_playlist(file, list, data = None):\n\n if not list:\n return\n\n exists = os.path.isfile(list)\n playlist = open(list, 'a')\n if not exists:\n playlist.write(\"#EXTM3U\\n\")\n\n if data:\n metadata = u\"#EXTINF: {}, {} - {} \\n\".format(data['time'], data['artist'], data['title'])\n playlist.write(metadata.encode('utf8'))\n\n playlist.write(file + \"\\n\")\n playlist.close()\n try:\n print 'Added to {}'.format(os.path.basename(list))\n except:\n pass", "def playlist_tracks_replace(self, playlist_id: str, track_ids: list):\n payload = {'uris': [to_uri('track', t) for t in track_ids]}\n return self._put(f'playlists/{playlist_id}/tracks)', payload=payload)", "def get_playlist_tracks(user, playlist_id, limit=100):\n info_dict = spotify.user_playlist_tracks(user, playlist_id, limit=limit)\n items = info_dict[\"items\"]\n tracks = []\n for i in range(len(items)):\n album_name = items[i][\"track\"][\"album\"][\"name\"]\n album_type = items[i][\"track\"][\"album\"][\"album_type\"]\n artists_names = ', '.join([\n items[i][\"track\"][\"artists\"][index][\"name\"]\n for index in range(len(items[i][\"track\"][\"artists\"]))\n ])\n track_name = items[i][\"track\"][\"name\"]\n popularity = items[i][\"track\"][\"popularity\"]\n track_id = items[i][\"track\"][\"id\"]\n tracks.append({\"Album Name\": album_name,\n \"Album Type\": album_type,\n \"Artist(s)\": artists_names,\n \"Track Name\": track_name,\n \"Popularity\": popularity,\n \"Track ID\": track_id\n })\n tracks.sort(key=lambda d: d['Popularity'], reverse=True)\n return tracks", "def spotify_playlist_as_json_tracks(playlist_id: int, access_token: str) -> list:\n query_url = \"https://api.spotify.com/v1/playlists/{}/tracks\".format(playlist_id)\n query_headers = {\"Authorization\": \"Bearer {}\".format(access_token)}\n # Get playlist tracks\n tracks_response = requests.get(query_url, headers=query_headers)\n if tracks_response.status_code != 200:\n return tracks_response.reason\n tracks_json = tracks_response.json()\n if \"error_description\" in tracks_json:\n return []\n # Get list of tracks\n tracks = []\n while \"next\" in tracks_json and tracks_json[\"next\"] is not None:\n for t in tracks_json[\"items\"]:\n tracks.append(t[\"track\"])\n tracks_json = requests.get(tracks_json[\"next\"], headers=query_headers).json()\n return tracks", "def get_playlist_tracks(playlist_id):\n\n results = spotifyObject.playlist_tracks(playlist_id)\n tracks = results['items']\n while results['next']:\n results = spotifyObject.next(results)\n tracks.extend(results['items'])\n return tracks", "def get_album_tracks(self):\n track_list = self.soup.findAll('div', class_='chart_row')\n number_of_tracks = 0\n titles = []\n urls = []\n track_numbers = []\n \n for track in track_list:\n track_title = re.sub(' Lyrics', '', \" \".join(track.h3.text.split()))\n lyrics_url = track.a['href']\n track_number = track.span.span.text.strip()\n \n if track_number == '':\n # Sometimes there are additional urls that are not a song's lyrics. Skip these.\n continue\n else:\n track_number = int(track_number)\n \n number_of_tracks += 1\n titles.append(track_title)\n urls.append(lyrics_url)\n track_numbers.append(track_number)\n \n if self.song_order:\n # Check that order values are okay.\n for number in self.song_order:\n if number > number_of_tracks:\n raise SongOrderValueError(f'Track number given ({number}) exceeds number of tracks ({number_of_tracks})')\n \n for title, url, number in zip(titles, urls, track_numbers):\n if self.song_order:\n if number not in self.song_order:\n print(f'Skipping song: {number:02d} {title}')\n continue\n \n lyrics = self.get_single_lyrics(url)\n self.album.add_song(Song(title=title, track_number=number, lyrics=lyrics))\n\n self.album.number_of_tracks = number_of_tracks", "def download_tracks(client, tracks, num_tracks=sys.maxsize, downloadable=False,\n folders=False, custom_path='', id3_extras={}):\n\n filenames = []\n\n for i, track in enumerate(tracks):\n\n # \"Track\" and \"Resource\" objects are actually different,\n # even though they're the same.\n if isinstance(track, soundcloud.resource.Resource):\n\n try:\n t_track = {}\n t_track['downloadable'] = track.downloadable\n t_track['streamable'] = track.streamable\n t_track['title'] = track.title\n t_track['user'] = {'username': track.user['username']}\n t_track['release_year'] = track.release\n t_track['genre'] = track.genre\n t_track['artwork_url'] = track.artwork_url\n if track.downloadable:\n t_track['stream_url'] = track.download_url\n else:\n if downloadable:\n puts_safe(colored.red(\"Skipping\") + colored.white(\": \" + track.title))\n continue\n if hasattr(track, 'stream_url'):\n t_track['stream_url'] = track.stream_url\n\n track = t_track\n except Exception as e:\n puts_safe(colored.white(track.title) + colored.red(' is not downloadable.'))\n continue\n\n if i > num_tracks - 1:\n continue\n try:\n if not track.get('stream_url', False):\n puts_safe(colored.white(track['title']) + colored.red(' is not downloadable.'))\n continue\n else:\n track_artist = sanitize_filename(track['user']['username'])\n track_title = sanitize_filename(track['title'])\n track_filename = track_artist + ' - ' + track_title + '.mp3'\n\n if folders:\n track_artist_path = join(custom_path, track_artist)\n if not exists(track_artist_path):\n mkdir(track_artist_path)\n track_filename = join(track_artist_path, track_filename)\n else:\n track_filename = join(custom_path, track_filename)\n\n if exists(track_filename):\n puts_safe(colored.yellow(\"Track already downloaded: \") + colored.white(track_title))\n continue\n\n puts_safe(colored.green(\"Downloading\") + colored.white(\": \" + track['title']))\n\n if track.get('direct', False):\n location = track['stream_url']\n else:\n stream = client.get(track['stream_url'], allow_redirects=False, limit=200)\n if hasattr(stream, 'location'):\n location = stream.location\n else:\n location = stream.url\n\n filename = download_file(location, track_filename)\n tagged = tag_file(filename,\n artist=track['user']['username'],\n title=track['title'],\n year=track['release_year'],\n genre=track['genre'],\n album=id3_extras.get('album', None),\n artwork_url=track['artwork_url'])\n\n if not tagged:\n wav_filename = filename[:-3] + 'wav'\n os.rename(filename, wav_filename)\n filename = wav_filename\n\n filenames.append(filename)\n except Exception as e:\n puts_safe(colored.red(\"Problem downloading \") + colored.white(track['title']))\n puts_safe(str(e))\n\n return filenames", "def add_tracks_to_playlist(self, track_ids):\n endpoint = f\"playlists/{self.playlist_id}/tracks\"\n self.spotify_client._headers[\"Content-Type\"] = \"application/json\"\n self.spotify_client._data = json.dumps(\n [f\"spotify:track:{track_id}\" for track_id in track_ids]\n )\n response = self.spotify_client._post_api_data(endpoint)\n return response", "def add_from_playlist(self, params):\n lists = params\n\n # Lists to load\n names = []\n for n in self.listIDs.keys():\n for l in lists:\n if 'playlist:' + l in n:\n names.append(n)\n\n self.add_playlist(names)", "def get_playlist_tracks_adapter(json_response):\n\n ret = {\"result\": []}\n for item in json_response['items']:\n ret[\"result\"].append(json_to_track_info(item[\"track\"]))\n return ret", "async def async_parse_m3u_url(self, playlist):\n try:\n websession = async_get_clientsession(self.hass)\n async with async_timeout.timeout(10):\n response = await websession.get(playlist)\n\n except (asyncio.TimeoutError, aiohttp.ClientError) as error:\n _LOGGER.warning(\n \"For: %s unable to get the M3U playlist: %s\", self._name, playlist\n )\n return playlist\n\n if response.status == HTTPStatus.OK:\n data = await response.text()\n _LOGGER.debug(\"For: %s M3U playlist: %s contents: %s\", self._name, playlist, data)\n\n lines = [line.strip(\"\\n\\r\") for line in data.split(\"\\n\") if line.strip(\"\\n\\r\") != \"\"]\n if len(lines) > 0:\n _LOGGER.debug(\"For: %s M3U playlist: %s lines: %s\", self._name, playlist, lines)\n urls = [u for u in lines if u.startswith('http')]\n _LOGGER.debug(\"For: %s M3U playlist: %s urls: %s\", self._name, playlist, urls)\n if len(urls) > 0:\n return urls[0]\n else:\n _LOGGER.error(\"For: %s M3U playlist: %s No valid http URL in the playlist!!!\", self._name, playlist)\n self._nometa = True\n else:\n _LOGGER.error(\"For: %s M3U playlist: %s No content to parse!!!\", self._name, playlist)\n\n else:\n _LOGGER.error(\n \"For: %s (%s) Get failed, response code: %s Full message: %s\",\n self._name,\n self._host,\n response.status,\n response,\n )\n\n return playlist", "def import_tracks_from_test_json(path, l, user):\n with open(path, \"rb\") as file:\n tracks = json.load(file)\n for track in tracks:\n if l is not None:\n l(track)\n\n serializer = MusicTrackSerializerW(data=track)\n serializer.initial_data[\"user\"] = user.pk\n serializer.is_valid(raise_exception=True)\n serializer.save()", "def get_playlist_tracks(playlist):\n track_ids = [id for id in load_from_json(f\"playlist_{playlist}.json\") if id is not None]\n tracks = []\n\n for i in range(0, len(track_ids), 50):\n tracks_info = sp.tracks(track_ids[i: i+50])['tracks']\n for track in tracks_info:\n if track:\n tracks.append({\n 'id': track['id'],\n 'name': track['name'],\n 'popularity': track['popularity']\n })\n df = pd.DataFrame(tracks)\n\n file = f\"playlist_{playlist}_df.csv\"\n df.to_csv(file)\n\n return file", "def test_get_pl_tracks(self):\n\n # Playlist 1\n result1 = self.client.get(\"playlist/pl1\")\n self.assertEqual(result1.status_code, 200)\n self.assertIn(b\"Track 1\", result1.data)\n self.assertIn(b\"Track 3\", result1.data)\n self.assertNotIn(b\"Track 5\", result1.data)\n\n # Playlist 2\n result2 = self.client.get(\"playlist/pl2\")\n self.assertEqual(result2.status_code, 200)\n self.assertIn(b\"Track 4\", result2.data)\n self.assertIn(b\"Track 5\", result2.data)\n self.assertNotIn(b\"Track 1\", result2.data)", "def add_tracks_to_spotify_playlist(\n tracks: list, playlist_spotify_id: str, access_token: str\n) -> Optional[str]:\n headers = {\n \"Authorization\": \"Bearer {}\".format(access_token),\n \"Content-Type\": \"application/json\",\n }\n # Add tracks 100 at a time per Spotify API docs\n for i in range(0, len(tracks), 100):\n last = min(i + 100, len(tracks))\n uris = []\n for t in tracks[i:last]:\n if t.spotify_id:\n uris.append(\"spotify:track:{}\".format(t.spotify_id))\n elif match_track_spotify(t, access_token):\n uris.append(\"spotify:track:{}\".format(t.spotify_id))\n response = requests.post(\n \"https://api.spotify.com/v1/playlists/{}/tracks\".format(\n playlist_spotify_id\n ),\n headers=headers,\n json={\"uris\": uris},\n )\n if response.status_code != 200 and response.status_code != 201:\n return \"Error: {}\".format(response.text)\n if last == len(tracks):\n break\n return None", "def playlist_items(self):\r\n return v3.PlaylistItems(self)", "def playlist_tracks(self, playlist_id: str, fields: str = None,\n market: str = 'from_token', limit: int = 100,\n offset: int = 0):\n return self._get(f'playlists/{playlist_id}/tracks', limit=limit,\n offset=offset, fields=fields, market=market)", "def convert_playlist_to_v2():\n # skip if previously done\n if os.path.isfile(g.PLFILE):\n return\n\n # skip if no playlist files exist\n elif not os.path.isfile(g.OLD_PLFILE):\n return\n\n try:\n with open(g.OLD_PLFILE, \"rb\") as plf:\n old_playlists = pickle.load(plf)\n\n except IOError:\n sys.exit(\"Couldn't open old playlist file\")\n\n # rename old playlist file\n backup = g.OLD_PLFILE + \"_v1_backup\"\n\n if os.path.isfile(backup):\n sys.exit(\"Error, backup exists but new playlist exists not!\")\n\n os.rename(g.OLD_PLFILE, backup)\n\n # do the conversion\n for plname, plitem in old_playlists.items():\n\n songs = []\n\n for video in plitem.songs:\n v = Video(video['link'], video['title'], video['duration'])\n songs.append(v)\n\n g.userpl[plname] = Playlist(plname, songs)\n\n # save as v2\n save_to_file()", "def create_playlist(self, data):\n pass", "def test_load_mp3_file(self):\n track = Track.from_filename(self.track_path('silence.mp3'))\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Track')\n self.assertEqual(track.ensemble, 'Group')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.tracknum, 1)\n self.assertEqual(track.seconds, 2.0)", "def user_playlist_replace_tracks(self, playlist_id, tracks, **kwargs):\n _id = self._get_playlist_id(playlist_id)\n # pylint: disable=no-member\n url = API.PLAYLIST_TRACKS.value.format(playlist_id=_id)\n first_100_tracks, rest_tracks = tracks[:100], tracks[100:]\n track_uris = list(map(self._get_track_uri, first_100_tracks))\n replaced = self._put(url, payload={\"uris\": track_uris}, **kwargs)\n if not rest_tracks:\n return replaced\n\n added = self.user_playlist_add_tracks(playlist_id, rest_tracks)\n if isinstance(added, list):\n return [replaced, *added]\n\n return [replaced, added]", "def get_item_tracks(item):\n tracks = []\n # how many tracks to load at the same time (can'collection_type do all at once because\n # of spotify API's limitations)\n batch_size = 50\n\n if item['type'] == 'playlist':\n api_func = SP.playlist_tracks\n elif item['type'] == 'album':\n api_func = SP.album_tracks\n\n # keep track of the index of the last batch\n offset = 0\n while True:\n # get one batch of tracks per iteration\n new_tracks = api_func(item['id'], limit=batch_size, offset=offset)\n new_tracks = new_tracks['items']\n\n # the 'playlist tracks' function hides the tracks one layer deeper\n if item['type'] == 'playlist':\n new_tracks = [collection_type['track'] for collection_type in new_tracks]\n\n # stop if no tracks are found at this offset\n if len(new_tracks) == 0:\n break\n\n tracks += new_tracks\n offset += batch_size\n\n return tracks", "def parse_tracks(tracks):\n accumulated_time = args.start_time\n for track in csv.reader(args.track_list, delimiter='\\t'):\n try:\n name, track_time, performer = parse_track_string(track)\n\n if args.timestamp:\n yield (track_time, name, performer)\n else:\n yield (accumulated_time, name, performer)\n accumulated_time += track_time\n except ValueError as v:\n logger.error(v)\n\n # The dummy track is required to make mp3splt split the last track.\n if args.dummy:\n if args.timestamp:\n yield (args.end_time, \"Dummy track\", args.performer)\n else:\n yield (accumulated_time, \"Dummy track\", args.performer)", "def playlistid(self, track_id=None):\n track_id = '' if track_id is None else track_id\n lines = yield from self.command('playlistid {}'.format(track_id))\n return parse_playlist(lines)", "def post_get_playlist(result, **kw):\n if result and 'tracks' in result:\n for playlist_track in result['tracks']:\n track = Track.query.filter_by(id=playlist_track['track_id']).first()\n if track:\n playlist_track['track'] = track.as_dict()", "def get_playlist_songs(self, playlist_id):\n values = {'action' : 'playlist_songs',\n 'filter' : playlist_id,\n }\n root = self.__call_api(values)\n songs = root.getElementsByTagName('song')\n if not songs:\n return None\n l= []\n try:\n for song in songs:\n song_id = int(song.getAttribute('id'))\n song_title = song.getElementsByTagName('title')[0].childNodes[0].data\n artist_id = int(song.getElementsByTagName('artist')[0].getAttribute('id'))\n artist_name = song.getElementsByTagName('artist')[0].childNodes[0].data\n album_id = int(song.getElementsByTagName('album')[0].getAttribute('id'))\n album_name = song.getElementsByTagName('album')[0].childNodes[0].data\n\n song_track = int(song.getElementsByTagName('track')[0].childNodes[0].data)\n song_time = int(song.getElementsByTagName('time')[0].childNodes[0].data)\n song_size = int(song.getElementsByTagName('size')[0].childNodes[0].data)\n\n try: # New Ampache puts nothing here...\n precise_rating = int(song.getElementsByTagName('preciserating')[0].childNodes[0].data)\n except:\n precise_rating = 0\n try:\n rating = float(song.getElementsByTagName('rating')[0].childNodes[0].data)\n except:\n rating = 0\n art = song.getElementsByTagName('art')[0].childNodes[0].data\n url = song.getElementsByTagName('url')[0].childNodes[0].data\n song_dict = {\n 'song_id' : song_id,\n 'song_title' : song_title,\n 'artist_id' : artist_id,\n 'artist_name' : artist_name,\n 'album_id' : album_id,\n 'album_name' : album_name,\n 'song_track' : song_track,\n 'song_time' : song_time,\n 'song_size' : song_size,\n 'precise_rating' : precise_rating,\n 'rating' : rating,\n 'art' : art,\n 'url' : url,\n }\n l.append(song_dict)\n except:\n print(\"This playlist failed\", playlist_id)\n traceback.print_exc()\n return None\n return l" ]
[ "0.6630314", "0.63010496", "0.6148589", "0.59600264", "0.59457934", "0.5888346", "0.5799984", "0.5784354", "0.5780846", "0.5779549", "0.577538", "0.5754062", "0.5745098", "0.57270885", "0.56991816", "0.5638412", "0.5616215", "0.56141627", "0.55851924", "0.55660707", "0.55420333", "0.5539387", "0.5524038", "0.5498065", "0.5492273", "0.54910463", "0.5476722", "0.5475222", "0.54607415", "0.546074" ]
0.7024213
0
get the value of property _Chassis
def Chassis(self): return self._Chassis
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCharger(self):\r\n if hasattr(self, \"charger\"):\r\n return self.charger\r\n else:\r\n return None", "def value(self):\r\n return self.__cargo", "def test_get_chassis(self):\n resp = self.chassis_client.get_chassis(self.chassis.uuid)\n self.assertEqual(resp.status_code, 200)\n chassis = resp.entity\n self.assertEqual(chassis.description, self.chassis_description)\n self.assertEqual(chassis.extra, self.chassis_extra)", "def value(self):\n return self.__cargo", "def list_chassis(self):\n return self.ironic_client.chassis.list()", "def get_chassis_type(device):\n\n try:\n out = device.parse('show version')\n except SubCommandFailure:\n log.info('Could not get device version information')\n return None\n\n return out.q.get_values('chassis', 0)", "def get_chassis_sn(device):\n\n try:\n out = device.parse('show version')\n except SubCommandFailure:\n log.info('Could not get device version information')\n return None\n\n return out.q.get_values('chassis_sn', 0)", "def _value(self):\n return self.device.value(*self._id[1:])", "def get_device_property(self, client, prop):\r\n value = client.getDeviceProperty(prop)\r\n return value", "def DoIt(self, host, vm, variable):\n\n vm = Operation.GetVm(host, vm)\n\n variableComponents = variable.split('.', 1)\n device = vm.GetDevice(variableComponents[0])\n if device:\n if len(variableComponents) > 1:\n return rec_getattr(device, variableComponents[1])\n else:\n return device\n\n\n value = vm.GetExtraConfig().get(variable, None)\n if value: return value\n\n return rec_getattr(vm, self.GetVmodlProperty(variable))", "def car(self):\n return self.pair.car", "def value(self, channel):\n if channel == 1:\n value = self.gas_turbo\n if channel == 2:\n value = self.gas_system_wrg\n if channel == 3:\n value = self.mass_spectrometer\n if channel == 4:\n value = self.gas_system_baratron\n return value", "def Get(self):\n return self.Bus.Read_uInt16(self.Address,0x68+self.Pin)", "def device(self):\n return self._vars[0].device", "def getMotor(self):\n return self._l[3]", "def get_value(self, device_name):\n return epics.caget(str(device_name))", "def getData(self):\n\t\t\treturn str(self.car)", "def value(self):\n return self.piece_behavior.value", "def get_property(self, client):\r\n client.getProperty()", "def prop(self):\n return getattr(self, name)", "def getPref(self):\n return col.BusDAO.FindByIndex(self.Scanbus)", "def test_ucs_get_chassis(self):\n api_data = request(\"get\", \"/chassis\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n self.assertGreater(len(api_data[\"json\"]), 0, \"Zero chassis elements found\")\n # TO DO more in depth testing for the returned content such as mac validation, etc...", "def value(self) -> Optional[Any]:\n return self.get(\"/V\")", "def get_battery(self) -> int:\r\n return self.state['bat']", "def get_value(self):", "def getBattery(self):\n raise NotImplementedError", "def get_value(self):\n pass", "def value(self):\n return super(Robot, self).value", "def __getattr__(self, attr):\n return self.product.get(attr, \"\")", "def getvalue(self):\n ..." ]
[ "0.66945535", "0.62844557", "0.62731075", "0.6223003", "0.6214095", "0.59638584", "0.5955606", "0.59384584", "0.5914062", "0.5908595", "0.59032404", "0.5835006", "0.5728211", "0.5713444", "0.5685319", "0.5671768", "0.55380136", "0.55307096", "0.5527974", "0.5512052", "0.55114603", "0.5506869", "0.54999053", "0.5494482", "0.5492441", "0.5481866", "0.5459408", "0.5454482", "0.5452919", "0.54376453" ]
0.7827342
0
get the value of property _Option
def Option(self): return self._Option
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_option(self, option):\n\t\treturn self.options[option]", "def get(self, option):\n return get(self.name, option)", "def get_option_value(self, key):\n\n # Check the key.\n self.__assert_option(key)\n\n # Get and return the value.\n return self.__opt[key]", "def OptionValue(self):\n if self.force_auto_sync:\n self.get('OptionValue')\n return self._OptionValue", "def opt_value(self):\n return self._opt_value", "def get_option(self, key):\n return self.options[key]", "def get_option(self, option):\n if not self._options.has_key(option):\n raise KeyError, \"Invalid option: \" + option\n else:\n return self._options.get(option)", "def get_value(self):\n return self._get_value(self.optionType, self.value, self.defaultValue)", "def __get_option(self, option):\n if option in Config.OPTIONS.keys():\n _default = Config.OPTIONS[option]\n elif option in Config.FILE_OPTIONS.keys():\n _default = Config.FILE_OPTIONS[option]\n elif option in Config.PATH_OPTIONS.keys():\n _default = Config.PATH_OPTIONS[option]\n else:\n _default = None # XXX ??\n \n _val = self.__get(option)\n\n if _val: \n return _val\n else:\n return _default", "def getOption(self, *args):\n return _libsbml.ConversionProperties_getOption(self, *args)", "def get_option_value(self, iprop, key):\n val = _pychidg.f90wrap_get_option_value(self=self._handle, iprop=iprop, key=key)\n return val", "def get_value(self, name, option, presentation=False):\r\n if name in self.values:\r\n value = self.values[name]\r\n if presentation:\r\n return option.presentation(value)\r\n else:\r\n return value\r\n else:\r\n raise OptionValueNotSetError(name, option)", "def get(self, section, option):\n if self._dict.has_key(section):\n return self._dict[section].get(option, None)\n return None", "def get(self, option, default=None):\n\t\treturn self._get_raw(option, '', default)", "def get_value(self, section, option):\n raise NotImplementedError()", "def optioninfo(self, option):\n return self._moptions[option]", "def getValue(self):\n return _libsbml.ConversionOption_getValue(self)", "def option(self, spec):\n return spec.options[self.rng.integers(len(spec.options))]", "def get_option(self, option, default=None):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n try:\n value = self.get(section, key)\n value = self._str_to_val(value)\n except ValueError, s:\n logger.warning(\"get failed for {}/{}: {}\".format(section,key,s))\n value = default\n except NoSectionError:\n value = default\n except NoOptionError:\n value = default\n\n return value", "def __getitem__(self, option):\n if option not in self.__dict__.keys():\n raise KeyError(\"Option '{}' not found.\".format(option))\n\n return self.__dict__[option]", "def get_option(self, name):\n option_df = self.dismod_file.option\n records = option_df[option_df.option_name == name]\n if len(records) == 1:\n return records.option_value.iloc[0]\n else:\n raise KeyError(f\"Option {name} not found in options\")", "def _get_option_value(self, section, option):\n value = None\n if self.config.has_section(section) and self.config.has_option(section, option):\n value = self.appName = self.config.get(section, option)\n return value", "def getint(self, option):\n return getint(self.name, option)", "def data_option(self):\n if \"dataOption\" in self._prop_dict:\n return self._prop_dict[\"dataOption\"]\n else:\n return None", "def get_option(key: str) -> Any:\n with _config_lock:\n config_options = get_config_options()\n\n if key not in config_options:\n raise RuntimeError('Config key \"%s\" not defined.' % key)\n return config_options[key].value", "def get_option_value(self, name: str, prefix: bool = False) -> Any:\n\n if prefix:\n name = f\"{self.parent.pyautodoc_prefix}-{name}\"\n\n if name in self.parent.options:\n return self.parent.options[name]\n elif self.is_available(name):\n return self.get_app_cfg_by_name(name)", "def option(self, key):\n if self.integration is None:\n return None\n return self.configuration.get(f'{self.get_config_name()}.{key}')", "def get_option(self, n):\n opts = self.view.options_panel.original_widget.contents()\n return opts[n][0].original_widget.contents[1][0]", "def __getattr__(self,name):\n # Check if we are getting an option\n if name not in ['_PreferencesSection__section','_PreferencesSection__options',\n '_PreferencesSection__config','_PreferencesSection__initialised','_PreferencesSection__get_option','_PreferencesSection__set_option']:\n if not self.__options.has_key(name):\n raise AttributeError('(EVOGTK - Preferences Helper) Preferences object has no attribute \\'%s\\'' % name)\n # Get option value\n return self.get_option(name)\n else:\n # Call original __getattr__ method\n return super(_PreferencesSection,self).__getattr__(name)", "def __getitem__(self, key):\n if hasattr(self, key):\n return getattr(self, key)\n else:\n raise KeyError('No such option `{}`.'.format(key))" ]
[ "0.8096682", "0.77774155", "0.7556324", "0.75427765", "0.7527482", "0.75237143", "0.7513979", "0.7381633", "0.73176533", "0.7317182", "0.7203971", "0.7119419", "0.7093766", "0.708832", "0.70553225", "0.70469296", "0.7028326", "0.70008403", "0.6975328", "0.6930424", "0.6911173", "0.69107646", "0.68956226", "0.68658125", "0.6754924", "0.6746562", "0.67363554", "0.672906", "0.669317", "0.66832465" ]
0.7837177
1
Draws a Run the test button on the page for a user.
def Button(request): params = { 'mimetype': 'text/javascript', 'fn': request.GET.get('fn', '_bRunTest'), 'btn_text': request.GET.get('btn_text', 'Run the test'), 'cb_text': request.GET.get('cb_text', 'and send my results to Browserscope (anonymously)'), } return util.Render(request, 'user_test_button.js', params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_run_button(self):\n\n run_button = Button(\n self.master, text=\"Run\", command=self.run_simulator)\n run_button.grid(row=6, column=1)\n\n return run_button", "def click_button(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def click_button(self):\n self.q(css='div#fixture input').first.click()", "def display(self):\n\t\tprint('The button in the window was clicked!')", "def on_run_button(self, event):\n text = _(u\"Run button pressed.\")\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()\n self.run_command()", "def run_button(self):\r\n self.step = False # Clear step command\r\n self.is_pause = False\r\n self.run_command()", "def batch_test_run():\n WebDriverWait(browser, 15).until(EC.visibility_of_element_located((By.XPATH, '//button[contains(text(), ''\"Run\")]')))\n batch_run_button = browser.find_elements_by_xpath('//button[contains(text(), \"Run\")]')\n for test in batch_run_button:\n test.click()\n time.sleep(4)", "def test_create_custom_button(self):\n pass", "def click_button(self):\n self.widgets.get('button').click()", "def run_button(self):\n if self.run.label == 'Run':\n self.run.label = 'Stop'\n self.run.button_type = 'danger'\n self.callback_obj = self.doc.add_periodic_callback(self.unlocked_task, 1000)\n\n else:\n self.run.label = 'Run'\n self.run.button_type = 'success'\n self.doc.remove_periodic_callback(self.callback_obj)", "def start_test(url):\n \n Debug.user(' ################# start Test ######################')\n App.open('firefox --private-window '+url)\n wait(\"1501595436606.png\", 10)\n\n click(\"1501595453560.png\")\n\n if exists():\n \n click()\n else:\n click()\n \n\n\n if exists(\"1499781534684.png\"):\n click(\"1499781552298.png\")\n type('root')\n click(\"1499781563870.png\")\n else:\n pass\n click(\"1499781591282.png\")", "def click(cls, user, link):\r\n pass", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def click_user_widget_button(self):\n return self", "def click(cls, user, link):\n pass", "def test_update_custom_button(self):\n pass", "def testButtonCB(self, testId):\n button = self.test_buttons[testId]\n if self.result:\n self.showTestOutput(testId)\n return", "def test_login_to_youtube(self):\n\n # Finding elements on the page and actions.\n self.wait_clickable_by_css(\n \"#buttons a > .style-scope.ytd-button-renderer\"\n \".style-suggestive.size-small[role='button']\"\n ).click()\n self.wait_clickable_by_id(\"identifierId\").send_keys(config.USER1[\"email\"])\n self.wait_clickable_by_id(\"identifierNext\").click()\n self.wait_invisibility_by_id(\"identifierId\")\n self.wait_clickable_by_css(\".whsOnd.zHQkBf\").send_keys(config.USER1[\"password\"])\n self.wait_clickable_by_id(\"passwordNext\").click()\n\n try:\n self.wait_clickable_by_css(\".ZFr60d.CeoRYc\").click()\n\n except:\n\n pass\n\n # Waiting for button to appear.\n self.wait_visibility_by_css(\"#avatar-btn\")\n self.make_screenshot()\n print(\"Test 1: User is successfully logged in.\")", "def testcase1(self):\r\n\r\n self.driver.find_element_by_xpath('//*[@id=\"screenshotContainer\"]/div/div/div/div/div/div[14]').click()\r\n self.driver.find_element_by_xpath('//*[@id=\"screenshotContainer\"]/div/div/div/div/div/div[23]').is_displayed()\r\n self.driver.find_element_by_xpath('//*[@id=\"screenshotContainer\"]/div/div/div/div/div/div[23]').is_enabled()", "def showTestBegin(self, test):\n self.test_buttons[test.id()].setState('running')\n self.showMessage('busy', test.id())\n self.update_idletasks()\n return", "def _createTestButtonArea(self, parent):\n frame = self.createcomponent('testbuttonframe', (), None,\n Frame,\n (parent,),\n relief=SUNKEN,\n bd=2)\n #\n # Widgets to show and change the directory to scan\n #\n frame2 = self.createcomponent('scanndirectoryframe', (), None,\n Frame,\n (frame,),\n relief=FLAT,\n bd=2)\n self.cwd_label = self.createcomponent('cwdlabel', (), None,\n Label,\n (frame2,),\n textvariable=self.scan_directory,\n relief=FLAT,\n justify=LEFT,\n anchor='w',\n )\n self.cwd_label.pack(side=LEFT,\n expand=YES,\n fill=X,\n )\n btn = self.createcomponent('changescandirectorybtn', (), None,\n Button,\n (frame2,),\n command=self.changeScanDirectoryCB,\n text='Change...',\n )\n btn.pack(side=LEFT,\n expand=NO,\n fill=X,\n )\n frame2.pack(side=TOP,\n expand=YES,\n fill=X,\n )\n #\n # Create the action buttons\n #\n self.createcomponent('testcanvas', (), None,\n Pmw.ScrolledCanvas,\n (frame,),\n canvas_background=self.user_preferences['background'],\n canvasmargin=self.user_preferences['spacing'],\n usehullsize=1,\n hull_height=5,\n )\n self.canvas = self.component('testcanvas').component('canvas')\n self.idleWidgets.append(self.canvas)\n self.component('testcanvas').pack(side=TOP,\n expand=YES,\n fill=BOTH,\n )\n \n frame.pack(side=TOP,\n expand=NO,\n fill=X,\n padx=self['padx'],\n pady=self['pady'],\n )\n #\n # Create a button for each test\n #\n self.configureTestIconsFromPrefs()\n self._updateTestButtons()\n #\n # Register the variable callback so that the buttons are updated\n # automatically later. We do not do this earlier to avoid\n # recursive loops.\n #\n #self.scan_directory.trace_variable('w', self._changeScanDirectoryVariableCB)\n return", "def showTestSuccess(self, test):\n #self._setTestButtonColor(test.id(), self.SUCCESS_COLOR)\n self.test_buttons[test.id()].setState('success')\n self.update_idletasks()\n return", "def test_get_custom_button(self):\n pass", "def test_button(self):\n callback = CallbackCounter()\n display = get_display(0)\n button = FakeButton()\n display.register_onpress(button, callback)\n assert callback == 0\n display.read()\n assert callback == 0\n button.value = True\n display.read()\n assert callback == 1\n for i in range(200):\n display.read()\n assert callback == 1", "def setup_button_run(self):\n run_icon = tk.PhotoImage(file = self.run_icon)\n self.button_run = tk.Button(\n self.toolbar,\n width = 24,\n height = 24,\n image = run_icon,\n command = self.run_world)\n self.button_run.image = run_icon\n self.button_run.grid(row = 0, column = 2, sticky = tk.W)", "def test_ProstateReporting1(self):\n\n self.delayDisplay(\"Starting the test\")\n\n self.delayDisplay('Test passed!')", "def main():\n run_test_draw_upside_down_wall()", "def main():\r\n root = tk.Tk()\r\n root.config(bg='gray40')\r\n root.geometry('800x400')\r\n # OpenUser('Christof', lambda: print('Hallo'), 'german')\r\n inter = User_Interface(root, 'Christof')\r\n inter.UserInterface.place(x=10, y=10)\r\n\r\n root.mainloop()" ]
[ "0.63031554", "0.62987185", "0.61791044", "0.61791044", "0.6075906", "0.6057968", "0.6029089", "0.5979514", "0.58742535", "0.58558655", "0.5851815", "0.57412475", "0.57368124", "0.5710455", "0.5707332", "0.5673687", "0.5670039", "0.56390357", "0.56390077", "0.56256866", "0.5610386", "0.56084305", "0.5580948", "0.5563278", "0.5549508", "0.55438846", "0.55317885", "0.5522374", "0.54859334", "0.5482684" ]
0.73393345
0
The User Test results table.
def Table(request, key): test = models.user_test.Test.get_mem(key) if not test: msg = 'No test was found with test_key %s.' % key return http.HttpResponseServerError(msg) params = { 'hide_nav': True, 'hide_footer': True, 'test': test, } return util.GetResults(request, 'user_test_table.html', params, test.get_test_set())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tabulate(self):\n for test_name, test in self.test_types.items():\n for ivs_name, ivs in self.ivs.items():\n if self.verbose:\n print(\"{0}: {1}\".format(ivs_name, test_name))\n tree = test(ivs)\n if not tree:\n continue\n score = tree.score(True)\n if self.verbose > 1:\n tree.print_structure()\n\n self.result_matrix['ivs name'][ivs_name][test_name] = score\n self.result_matrix['test type'][test_name][ivs_name] = score", "def getTestResults():", "def create_blast_results_table(self):\n res = BLASTUtilities.BLASTResult()\n log.info(\"Creating table to store BLAST results ...\")\n fields = [\"gene_id\"] + res.fields_names\n types = [str]+res.fields_types\n self.create_table(self.BlastResultsTable,fields, types)", "def populate_table(self, username = \"\"):\n db_acces = DbMethods()\n users = db_acces.select_users(username)\n\n self.result_table.setRowCount(len(users))\n\n for i in range(len(users)):\n user = users[i]\n item_user = QTableWidgetItem(user[\"username\"])\n self.result_table.setItem(i, 0, item_user)", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def view_result_table(cv) -> pd.DataFrame:\n columns = [\"params\", \"mean_test_score\", \"std_test_score\", \"rank_test_score\"]\n return pd.DataFrame(cv.cv_results_)[columns].sort_values(by=[\"rank_test_score\"])", "def test_present_results_displays_results(self):\n # to test this we don't actually need to write to the database,\n # we just need a list of ordered_dicts in menu.records\n test_records = [\n OrderedDict([\n ('name', 'Test Employee 1'),\n ('date', datetime.date(2018, 5, 1)),\n ('task_name', 'Test Task 1'),\n ('duration', 1),\n ('notes', 'This is a note for the first test task')\n ]),\n OrderedDict([\n ('name', 'Test Employee 2'),\n ('date', datetime.date(2018, 5, 2)),\n ('task_name', 'Test Task 2'),\n ('duration', 2),\n ('notes', 'This is a note for the second test task')\n ]),\n ]\n self.menu.records = [test_records[0]]\n f_username = test_records[0]['name']\n f_date = test_records[0]['date'].strftime(\"%Y-%m-%d\")\n f_time_taken = str(test_records[0]['duration'])\n f_task_name = test_records[0]['task_name']\n f_notes = test_records[0]['notes']\n short_form = \"{}: {} ({}m): {} | {}\".format(\n f_username,\n f_date,\n f_time_taken,\n f_task_name,\n f_notes\n )\n expected_output = (\"\\nSearch Results\\n\" +\n \"1) {}\\n\".format(short_form) +\n \"\\n\" +\n \"Available actions:\\n\" +\n \"v) View detail\\n\" +\n \"e) Edit\\n\" +\n \"d) Delete\\n\" +\n \"m) go back to Main menu\\n\" +\n \"q) quit\\n\")\n\n '''The process for capturing `print()` statements and redirecting to\n an accumulating object for later processing has the following steps:\n 1. import io and sys\n 2. in the test function, create a StringIO object\n (this is a buffer object that will be the destination for the\n redirected stdout)\n ```\n captured_output = io.StringIO()\n ```\n 3. point stdout at the capture object\n ```\n sys.stdout = captured_output\n ```\n 4. Run code as normal, any print() statement will go to\n the StringIO object instead of standard out\n 5. Revert stdout (will not affect the contents of the StringIO buffer)\n ```\n sys.stdout = sys.__stdout__\n ```\n 6. Run the rest of the code. The contents of the StringIO buffer can\n be accessed as follows:\n ```\n captured_output.getvalue()\n ```\n '''\n # Create a StringIO object to be a capture object\n captured_output = io.StringIO()\n # point stdout at the capture object\n sys.stdout = captured_output\n # Do anything that's going to have a print statement\n # (these will be accumulated in the captured_output object)\n example_input = 'q'\n with patch('builtins.input', side_effect=example_input):\n self.menu.present_results()\n\n # Revert stdout (captured_output still holds the captured items)\n sys.stdout = sys.__stdout__\n # Do any other test code (e.g., asserts)\n self.assertEqual(expected_output, captured_output.getvalue())", "def _fetch_sample_data_from_user_query(self) -> TableData:\n rnd = self.session.execute(f\"{self._profile_sample_query}\")\n try:\n columns = [col.name for col in rnd.cursor.description]\n except AttributeError:\n columns = list(rnd.keys())\n return TableData(\n columns=columns,\n rows=[list(row) for row in rnd.fetchmany(100)],\n )", "def add_to_table(self):\n if len(self.result) == 0:\n self.result = {self.title: [self.accuracy, self.f1, self.precision]}\n self.result = pd.DataFrame(self.result, index=['Accuracy', 'F-score', 'Precision'])\n return self.result\n else:\n conact = {self.title: [self.accuracy, self.f1, self.precision]}\n conact = pd.DataFrame(conact, index=['Accuracy', 'F-score', 'Precision'])\n self.result = pd.concat([self.result, conact], axis=1)\n return self.result", "def test_get_total_users_get(self):\n pass", "def statistics_on_test(self, predicted_results, result):\n # Print confusion matrix and mean average precision score\n predicted_results_binary = self.predicted_results_to_binary(predicted_results)\n print(\"\\nConfusion matrix : \")\n print(confusion_matrix(result, predicted_results_binary))\n print(\"\\nAverage precision score : \", average_precision_score(result, predicted_results_binary))", "def test_get_results(self):\n pass", "def export_results(self):\n problemIDs = list(set([result.problemID for result in self.results]))\n configIDs = list(set([result.configID for result in self.results]))\n\n labels = []\n labels.extend(TestResults._fields)\n labels.extend(SizeMetrics._fields) \n # Remove unused columns\n labels.remove(\"size_metrics\")\n labels.remove(\"problemID\")\n labels.remove(\"configID\")\n\n # output = pd.Panel(items=labels, major_axis=problemIDs, minor_axis=configIDs)\n multiindex = pd.MultiIndex.from_product([problemIDs, configIDs], names=[\"problems\", \"configs\"])\n\n output = pd.DataFrame(index=multiindex, columns=labels)\n output.columns.names = [\"stats\"]\n\n for result in self.results:\n problemID = result.problemID\n configID = result.configID\n for label in [label for label in TestResults._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result, label)\n for label in [label for label in SizeMetrics._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result.size_metrics, label)\n\n # Compute Statistics\n output.fillna(value=np.nan, inplace=True)\n output.sort_index(inplace=True)\n try:\n TestFramework.compute_mosek_error(output, \"opt_val\", \"mosek_config\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_mosek_error: 'mosek_config' or 'opt_val' field not found.\")\n try:\n TestFramework.compute_performance(output, \"solve_time\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_performance: 'solve_time' field not found.\")\n return output", "def results(self):\n pass", "def _generate_report(self):\n total_duration = 0.0\n total_nb_tests = 0\n total_nb_success = 0\n nb_modules = 0\n payload = []\n\n res_table = prettytable.PrettyTable(\n padding_width=2,\n field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])\n res_table.align['Module'] = \"l\"\n res_table.align['Duration'] = \"r\"\n res_table.align['Success'] = \"r\"\n\n # for each scenario we draw a row for the table\n for item in self.summary:\n if item['task_status'] is True:\n nb_modules += 1\n total_duration += item['overall_duration']\n total_nb_tests += item['nb_tests']\n total_nb_success += item['nb_success']\n try:\n success_avg = 100 * item['nb_success'] / item['nb_tests']\n except ZeroDivisionError:\n success_avg = 0\n success_str = f\"{success_avg:0.2f}%\"\n duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(item['overall_duration']))\n res_table.add_row([item['test_name'], duration_str,\n item['nb_tests'], success_str])\n payload.append({'module': item['test_name'],\n 'details': {'duration': item['overall_duration'],\n 'nb tests': item['nb_tests'],\n 'success rate': success_str,\n 'success': item['success'],\n 'failures': item['failures']}})\n\n total_duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(total_duration))\n try:\n self.result = 100 * total_nb_success / total_nb_tests\n except ZeroDivisionError:\n self.result = 100\n success_rate = f\"{self.result:0.2f}\"\n success_rate_str = str(success_rate) + '%'\n res_table.add_row([\"\", \"\", \"\", \"\"])\n res_table.add_row([\"TOTAL:\", total_duration_str, total_nb_tests,\n success_rate_str])\n\n LOGGER.info(\"Rally Summary Report:\\n\\n%s\\n\", res_table.get_string())\n LOGGER.info(\"Rally '%s' success_rate is %s%% in %s/%s modules\",\n self.case_name, success_rate, nb_modules,\n len(self.summary))\n self.details['summary'] = {'duration': total_duration,\n 'nb tests': total_nb_tests,\n 'nb success': success_rate}\n self.details[\"modules\"] = payload", "def generate_table(results):\n keyslist = list(results[0].keys())\n table = PrettyTable(keyslist)\n for dct in results:\n table.add_row([dct.get(c, \"\") for c in keyslist])\n return table", "def getResults():", "def print_mistakes_table():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n\n cursor.execute(\"select * from mistakes\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Given Answer','User Given Answer']\n for row in data:\n table.add_row(row)\n conn.close()\n\n return table", "def html_table(self,relpath=None):\n tbl = Table(('module','status'),\n module='FastQC test',status='Outcome')\n tbl.add_css_classes('fastqc_summary','summary')\n for name in self.modules:\n tbl.add_row(module=Link(name,self.link_to_module(name,\n relpath=relpath)),\n status=\"<span class='%s'>%s</span>\" % (\n self.status(name),\n self.status(name)))\n return tbl.html()", "def populate_table(self, table: Table, name=None) -> None:\n new_table = Table()\n\n if name is None:\n name = self.returns_tms.name\n\n new_table.set_column_names([\"Statistic\", name])\n for item in self._get_results_list():\n row_name = item[1] + \" [\" + item[3] + \"]\"\n if item[3] == '':\n row_name = item[1]\n\n new_table.add_row([row_name, Table.Cell(item[2])])\n\n if len(table.rows) != 0:\n new_table = table.combine(new_table)\n\n table.set_column_names(new_table.get_column_names())\n table.rows = new_table.rows", "def get_user_results_from_db(self):\n results = axdb_client.get_approval_results(leaf_id=self.leaf_id)\n return results", "def get_result_table_and_info(cls):\n winning_dict = cls.get_winning_topics()\n winning_topics = winning_dict['winning_topics']\n runoff_poll_warning = winning_dict['runoff_poll_warning']\n\n # Create table\n result_table = []\n all_categories = sorted(Category.objects.all(), key=attrgetter('sum_of_votes', 'weight'), reverse=True)\n for category in all_categories:\n category_hoechstzahls = filter(lambda hoechstzahl: hoechstzahl.topic.category == category, cls.all_hoechstzahls)\n category_hoechstzahls.sort(key=lambda hoechstzahl: hoechstzahl.value, reverse=True)\n runoff_poll_warning = second_runoff_poll_check(runoff_poll_warning, category_hoechstzahls, winning_topics)\n category_hoechstzahls += (max(config['openslides_topicvoting_posts'], 3) - len(category_hoechstzahls)) * [None]\n result_table.append(category_hoechstzahls)\n\n # Return table and flags as dictionary\n return {'result_table': result_table,\n 'winning_topics': winning_topics,\n 'runoff_poll_warning': runoff_poll_warning,\n 'topic_post_warning': winning_dict['topic_post_warning']}", "def get_results(self):\n error_dict = {'error_code_test': self.error_code_test,\n 'error_text_test': self.error_text_test}\n\n return self.testresults, error_dict, self.checkstats", "def _process_results(self):\n self.portfolio.create_backtest_result_dataframe()\n stats = self._show_stats()\n return stats", "def results(self):\r\n pass", "def evaluate_all_results(self, nbr_items: int = -1, val_size: float = 0.2, n: int = 3) -> pd.DataFrame:\n results = list(map(\n lambda x: self.evaluate_one_user(x, val_size, n),\n self.database.users.data['user_id'].tolist()[:nbr_items]\n ))\n return pd.DataFrame.from_records(results)", "def users(accountable, query):\n users = accountable.users(query)\n headers = ['display_name', 'key']\n if users:\n rows = [[v for k, v in sorted(u.items()) if k in headers]\n for u in users]\n rows.insert(0, headers)\n print_table(SingleTable(rows))\n else:\n click.secho('No users found for query {}'.format(\n query\n ), fg='red')", "def usage_table_format(result):\n table = []\n for item in result:\n row = OrderedDict()\n row['Value'] = item['name']['localizedValue']\n row['Usage'] = item['currentValue'] or \"0\"\n row['Limit'] = item['limit'] or \"0\"\n table.append(row)\n return table", "def _t_test_results(self):\n t, df, p = self.api.m.math_utils.welchs_t_test(\n self.lkgr.values, self.fkbr.values)\n lines = [\n 'LKGR values: %r' % self.lkgr.values,\n 'FKBR values: %r' % self.fkbr.values,\n 't-statistic: %r' % t,\n 'deg. of freedom: %r' % df,\n 'p-value: %r' % p,\n 'Confidence score: %r' % (100 * (1 - p))\n ]\n return '\\n'.join(lines)", "def test_get_result_histogram(self):\n pass" ]
[ "0.6465429", "0.6332018", "0.61088157", "0.6088008", "0.60849124", "0.6081138", "0.6075006", "0.60503936", "0.5999182", "0.5970002", "0.59527034", "0.58797216", "0.5863282", "0.5861191", "0.5843431", "0.5839", "0.5837997", "0.58336246", "0.5829603", "0.5828688", "0.58000195", "0.57811207", "0.5778799", "0.57697433", "0.5760748", "0.57580286", "0.5743248", "0.57324785", "0.5714009", "0.5700144" ]
0.728486
0